Nekochu commited on
Commit
3cd73eb
·
verified ·
1 Parent(s): 6eaafef

Add overall stable checkpoint-25000 from "trained_model" - Model: myt5-large

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ byte_maps.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<extra_id_0>": 259,
3
+ "<extra_id_100>": 359,
4
+ "<extra_id_101>": 360,
5
+ "<extra_id_102>": 361,
6
+ "<extra_id_103>": 362,
7
+ "<extra_id_104>": 363,
8
+ "<extra_id_105>": 364,
9
+ "<extra_id_106>": 365,
10
+ "<extra_id_107>": 366,
11
+ "<extra_id_108>": 367,
12
+ "<extra_id_109>": 368,
13
+ "<extra_id_10>": 269,
14
+ "<extra_id_110>": 369,
15
+ "<extra_id_111>": 370,
16
+ "<extra_id_112>": 371,
17
+ "<extra_id_113>": 372,
18
+ "<extra_id_114>": 373,
19
+ "<extra_id_115>": 374,
20
+ "<extra_id_116>": 375,
21
+ "<extra_id_117>": 376,
22
+ "<extra_id_118>": 377,
23
+ "<extra_id_119>": 378,
24
+ "<extra_id_11>": 270,
25
+ "<extra_id_120>": 379,
26
+ "<extra_id_121>": 380,
27
+ "<extra_id_122>": 381,
28
+ "<extra_id_123>": 382,
29
+ "<extra_id_124>": 383,
30
+ "<extra_id_12>": 271,
31
+ "<extra_id_13>": 272,
32
+ "<extra_id_14>": 273,
33
+ "<extra_id_15>": 274,
34
+ "<extra_id_16>": 275,
35
+ "<extra_id_17>": 276,
36
+ "<extra_id_18>": 277,
37
+ "<extra_id_19>": 278,
38
+ "<extra_id_1>": 260,
39
+ "<extra_id_20>": 279,
40
+ "<extra_id_21>": 280,
41
+ "<extra_id_22>": 281,
42
+ "<extra_id_23>": 282,
43
+ "<extra_id_24>": 283,
44
+ "<extra_id_25>": 284,
45
+ "<extra_id_26>": 285,
46
+ "<extra_id_27>": 286,
47
+ "<extra_id_28>": 287,
48
+ "<extra_id_29>": 288,
49
+ "<extra_id_2>": 261,
50
+ "<extra_id_30>": 289,
51
+ "<extra_id_31>": 290,
52
+ "<extra_id_32>": 291,
53
+ "<extra_id_33>": 292,
54
+ "<extra_id_34>": 293,
55
+ "<extra_id_35>": 294,
56
+ "<extra_id_36>": 295,
57
+ "<extra_id_37>": 296,
58
+ "<extra_id_38>": 297,
59
+ "<extra_id_39>": 298,
60
+ "<extra_id_3>": 262,
61
+ "<extra_id_40>": 299,
62
+ "<extra_id_41>": 300,
63
+ "<extra_id_42>": 301,
64
+ "<extra_id_43>": 302,
65
+ "<extra_id_44>": 303,
66
+ "<extra_id_45>": 304,
67
+ "<extra_id_46>": 305,
68
+ "<extra_id_47>": 306,
69
+ "<extra_id_48>": 307,
70
+ "<extra_id_49>": 308,
71
+ "<extra_id_4>": 263,
72
+ "<extra_id_50>": 309,
73
+ "<extra_id_51>": 310,
74
+ "<extra_id_52>": 311,
75
+ "<extra_id_53>": 312,
76
+ "<extra_id_54>": 313,
77
+ "<extra_id_55>": 314,
78
+ "<extra_id_56>": 315,
79
+ "<extra_id_57>": 316,
80
+ "<extra_id_58>": 317,
81
+ "<extra_id_59>": 318,
82
+ "<extra_id_5>": 264,
83
+ "<extra_id_60>": 319,
84
+ "<extra_id_61>": 320,
85
+ "<extra_id_62>": 321,
86
+ "<extra_id_63>": 322,
87
+ "<extra_id_64>": 323,
88
+ "<extra_id_65>": 324,
89
+ "<extra_id_66>": 325,
90
+ "<extra_id_67>": 326,
91
+ "<extra_id_68>": 327,
92
+ "<extra_id_69>": 328,
93
+ "<extra_id_6>": 265,
94
+ "<extra_id_70>": 329,
95
+ "<extra_id_71>": 330,
96
+ "<extra_id_72>": 331,
97
+ "<extra_id_73>": 332,
98
+ "<extra_id_74>": 333,
99
+ "<extra_id_75>": 334,
100
+ "<extra_id_76>": 335,
101
+ "<extra_id_77>": 336,
102
+ "<extra_id_78>": 337,
103
+ "<extra_id_79>": 338,
104
+ "<extra_id_7>": 266,
105
+ "<extra_id_80>": 339,
106
+ "<extra_id_81>": 340,
107
+ "<extra_id_82>": 341,
108
+ "<extra_id_83>": 342,
109
+ "<extra_id_84>": 343,
110
+ "<extra_id_85>": 344,
111
+ "<extra_id_86>": 345,
112
+ "<extra_id_87>": 346,
113
+ "<extra_id_88>": 347,
114
+ "<extra_id_89>": 348,
115
+ "<extra_id_8>": 267,
116
+ "<extra_id_90>": 349,
117
+ "<extra_id_91>": 350,
118
+ "<extra_id_92>": 351,
119
+ "<extra_id_93>": 352,
120
+ "<extra_id_94>": 353,
121
+ "<extra_id_95>": 354,
122
+ "<extra_id_96>": 355,
123
+ "<extra_id_97>": 356,
124
+ "<extra_id_98>": 357,
125
+ "<extra_id_99>": 358,
126
+ "<extra_id_9>": 268
127
+ }
byte_maps.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2117fbbdacd95d9c5c1327bf096fd4048877f67aea7b68c07cbd77d8c68d003
3
+ size 11644649
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5ForConditionalGeneration"
4
+ ],
5
+ "classifier_dropout": 0.0,
6
+ "d_ff": 3840,
7
+ "d_kv": 64,
8
+ "d_model": 1536,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.0,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "gradient_checkpointing": false,
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 16,
22
+ "num_layers": 36,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "tie_word_embeddings": false,
28
+ "tokenizer_class": "MyT5Tokenizer",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.52.2",
31
+ "use_cache": true,
32
+ "vocab_size": 384
33
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.52.2"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf6259a1c4169e48dc461f3d2c233e621eadb3c599afb58b22ae7e5202dd207
3
+ size 4912795416
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19666fc256ff6199564f54065299e4a22a9cb8942a87e4f8271dee5995201188
3
+ size 2500805707
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0676bf75358ef5347eeb5b68e2f9bcd272783146837335dabe76f8adbfdb1957
3
+ size 14645
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c50d7f913c4429bf52d23c2f0d4f1093de6d29339591bc8af8bbdb6935cabade
3
+ size 1465
special_tokens_map.json ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>",
103
+ "<extra_id_100>",
104
+ "<extra_id_101>",
105
+ "<extra_id_102>",
106
+ "<extra_id_103>",
107
+ "<extra_id_104>",
108
+ "<extra_id_105>",
109
+ "<extra_id_106>",
110
+ "<extra_id_107>",
111
+ "<extra_id_108>",
112
+ "<extra_id_109>",
113
+ "<extra_id_110>",
114
+ "<extra_id_111>",
115
+ "<extra_id_112>",
116
+ "<extra_id_113>",
117
+ "<extra_id_114>",
118
+ "<extra_id_115>",
119
+ "<extra_id_116>",
120
+ "<extra_id_117>",
121
+ "<extra_id_118>",
122
+ "<extra_id_119>",
123
+ "<extra_id_120>",
124
+ "<extra_id_121>",
125
+ "<extra_id_122>",
126
+ "<extra_id_123>",
127
+ "<extra_id_124>"
128
+ ],
129
+ "eos_token": {
130
+ "content": "</s>",
131
+ "lstrip": false,
132
+ "normalized": true,
133
+ "rstrip": false,
134
+ "single_word": false
135
+ },
136
+ "pad_token": {
137
+ "content": "<pad>",
138
+ "lstrip": false,
139
+ "normalized": true,
140
+ "rstrip": false,
141
+ "single_word": false
142
+ },
143
+ "unk_token": {
144
+ "content": "<unk>",
145
+ "lstrip": false,
146
+ "normalized": true,
147
+ "rstrip": false,
148
+ "single_word": false
149
+ }
150
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,1163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "</s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<unk>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "259": {
28
+ "content": "<extra_id_0>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "260": {
36
+ "content": "<extra_id_1>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "261": {
44
+ "content": "<extra_id_2>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "262": {
52
+ "content": "<extra_id_3>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "263": {
60
+ "content": "<extra_id_4>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "264": {
68
+ "content": "<extra_id_5>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "265": {
76
+ "content": "<extra_id_6>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "266": {
84
+ "content": "<extra_id_7>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "267": {
92
+ "content": "<extra_id_8>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "268": {
100
+ "content": "<extra_id_9>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "269": {
108
+ "content": "<extra_id_10>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "270": {
116
+ "content": "<extra_id_11>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "271": {
124
+ "content": "<extra_id_12>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "272": {
132
+ "content": "<extra_id_13>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "273": {
140
+ "content": "<extra_id_14>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "274": {
148
+ "content": "<extra_id_15>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "275": {
156
+ "content": "<extra_id_16>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "276": {
164
+ "content": "<extra_id_17>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "277": {
172
+ "content": "<extra_id_18>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "278": {
180
+ "content": "<extra_id_19>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "279": {
188
+ "content": "<extra_id_20>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "280": {
196
+ "content": "<extra_id_21>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "281": {
204
+ "content": "<extra_id_22>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "282": {
212
+ "content": "<extra_id_23>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "283": {
220
+ "content": "<extra_id_24>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "284": {
228
+ "content": "<extra_id_25>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "285": {
236
+ "content": "<extra_id_26>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "286": {
244
+ "content": "<extra_id_27>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "287": {
252
+ "content": "<extra_id_28>",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "288": {
260
+ "content": "<extra_id_29>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "289": {
268
+ "content": "<extra_id_30>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "290": {
276
+ "content": "<extra_id_31>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "291": {
284
+ "content": "<extra_id_32>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "292": {
292
+ "content": "<extra_id_33>",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "293": {
300
+ "content": "<extra_id_34>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "294": {
308
+ "content": "<extra_id_35>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "295": {
316
+ "content": "<extra_id_36>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "296": {
324
+ "content": "<extra_id_37>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "297": {
332
+ "content": "<extra_id_38>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "298": {
340
+ "content": "<extra_id_39>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "299": {
348
+ "content": "<extra_id_40>",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "300": {
356
+ "content": "<extra_id_41>",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "301": {
364
+ "content": "<extra_id_42>",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "302": {
372
+ "content": "<extra_id_43>",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "303": {
380
+ "content": "<extra_id_44>",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "304": {
388
+ "content": "<extra_id_45>",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "305": {
396
+ "content": "<extra_id_46>",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "306": {
404
+ "content": "<extra_id_47>",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "307": {
412
+ "content": "<extra_id_48>",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "308": {
420
+ "content": "<extra_id_49>",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "309": {
428
+ "content": "<extra_id_50>",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "310": {
436
+ "content": "<extra_id_51>",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "311": {
444
+ "content": "<extra_id_52>",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "312": {
452
+ "content": "<extra_id_53>",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "313": {
460
+ "content": "<extra_id_54>",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "314": {
468
+ "content": "<extra_id_55>",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "315": {
476
+ "content": "<extra_id_56>",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "316": {
484
+ "content": "<extra_id_57>",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "317": {
492
+ "content": "<extra_id_58>",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "318": {
500
+ "content": "<extra_id_59>",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "319": {
508
+ "content": "<extra_id_60>",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "320": {
516
+ "content": "<extra_id_61>",
517
+ "lstrip": false,
518
+ "normalized": false,
519
+ "rstrip": false,
520
+ "single_word": false,
521
+ "special": true
522
+ },
523
+ "321": {
524
+ "content": "<extra_id_62>",
525
+ "lstrip": false,
526
+ "normalized": false,
527
+ "rstrip": false,
528
+ "single_word": false,
529
+ "special": true
530
+ },
531
+ "322": {
532
+ "content": "<extra_id_63>",
533
+ "lstrip": false,
534
+ "normalized": false,
535
+ "rstrip": false,
536
+ "single_word": false,
537
+ "special": true
538
+ },
539
+ "323": {
540
+ "content": "<extra_id_64>",
541
+ "lstrip": false,
542
+ "normalized": false,
543
+ "rstrip": false,
544
+ "single_word": false,
545
+ "special": true
546
+ },
547
+ "324": {
548
+ "content": "<extra_id_65>",
549
+ "lstrip": false,
550
+ "normalized": false,
551
+ "rstrip": false,
552
+ "single_word": false,
553
+ "special": true
554
+ },
555
+ "325": {
556
+ "content": "<extra_id_66>",
557
+ "lstrip": false,
558
+ "normalized": false,
559
+ "rstrip": false,
560
+ "single_word": false,
561
+ "special": true
562
+ },
563
+ "326": {
564
+ "content": "<extra_id_67>",
565
+ "lstrip": false,
566
+ "normalized": false,
567
+ "rstrip": false,
568
+ "single_word": false,
569
+ "special": true
570
+ },
571
+ "327": {
572
+ "content": "<extra_id_68>",
573
+ "lstrip": false,
574
+ "normalized": false,
575
+ "rstrip": false,
576
+ "single_word": false,
577
+ "special": true
578
+ },
579
+ "328": {
580
+ "content": "<extra_id_69>",
581
+ "lstrip": false,
582
+ "normalized": false,
583
+ "rstrip": false,
584
+ "single_word": false,
585
+ "special": true
586
+ },
587
+ "329": {
588
+ "content": "<extra_id_70>",
589
+ "lstrip": false,
590
+ "normalized": false,
591
+ "rstrip": false,
592
+ "single_word": false,
593
+ "special": true
594
+ },
595
+ "330": {
596
+ "content": "<extra_id_71>",
597
+ "lstrip": false,
598
+ "normalized": false,
599
+ "rstrip": false,
600
+ "single_word": false,
601
+ "special": true
602
+ },
603
+ "331": {
604
+ "content": "<extra_id_72>",
605
+ "lstrip": false,
606
+ "normalized": false,
607
+ "rstrip": false,
608
+ "single_word": false,
609
+ "special": true
610
+ },
611
+ "332": {
612
+ "content": "<extra_id_73>",
613
+ "lstrip": false,
614
+ "normalized": false,
615
+ "rstrip": false,
616
+ "single_word": false,
617
+ "special": true
618
+ },
619
+ "333": {
620
+ "content": "<extra_id_74>",
621
+ "lstrip": false,
622
+ "normalized": false,
623
+ "rstrip": false,
624
+ "single_word": false,
625
+ "special": true
626
+ },
627
+ "334": {
628
+ "content": "<extra_id_75>",
629
+ "lstrip": false,
630
+ "normalized": false,
631
+ "rstrip": false,
632
+ "single_word": false,
633
+ "special": true
634
+ },
635
+ "335": {
636
+ "content": "<extra_id_76>",
637
+ "lstrip": false,
638
+ "normalized": false,
639
+ "rstrip": false,
640
+ "single_word": false,
641
+ "special": true
642
+ },
643
+ "336": {
644
+ "content": "<extra_id_77>",
645
+ "lstrip": false,
646
+ "normalized": false,
647
+ "rstrip": false,
648
+ "single_word": false,
649
+ "special": true
650
+ },
651
+ "337": {
652
+ "content": "<extra_id_78>",
653
+ "lstrip": false,
654
+ "normalized": false,
655
+ "rstrip": false,
656
+ "single_word": false,
657
+ "special": true
658
+ },
659
+ "338": {
660
+ "content": "<extra_id_79>",
661
+ "lstrip": false,
662
+ "normalized": false,
663
+ "rstrip": false,
664
+ "single_word": false,
665
+ "special": true
666
+ },
667
+ "339": {
668
+ "content": "<extra_id_80>",
669
+ "lstrip": false,
670
+ "normalized": false,
671
+ "rstrip": false,
672
+ "single_word": false,
673
+ "special": true
674
+ },
675
+ "340": {
676
+ "content": "<extra_id_81>",
677
+ "lstrip": false,
678
+ "normalized": false,
679
+ "rstrip": false,
680
+ "single_word": false,
681
+ "special": true
682
+ },
683
+ "341": {
684
+ "content": "<extra_id_82>",
685
+ "lstrip": false,
686
+ "normalized": false,
687
+ "rstrip": false,
688
+ "single_word": false,
689
+ "special": true
690
+ },
691
+ "342": {
692
+ "content": "<extra_id_83>",
693
+ "lstrip": false,
694
+ "normalized": false,
695
+ "rstrip": false,
696
+ "single_word": false,
697
+ "special": true
698
+ },
699
+ "343": {
700
+ "content": "<extra_id_84>",
701
+ "lstrip": false,
702
+ "normalized": false,
703
+ "rstrip": false,
704
+ "single_word": false,
705
+ "special": true
706
+ },
707
+ "344": {
708
+ "content": "<extra_id_85>",
709
+ "lstrip": false,
710
+ "normalized": false,
711
+ "rstrip": false,
712
+ "single_word": false,
713
+ "special": true
714
+ },
715
+ "345": {
716
+ "content": "<extra_id_86>",
717
+ "lstrip": false,
718
+ "normalized": false,
719
+ "rstrip": false,
720
+ "single_word": false,
721
+ "special": true
722
+ },
723
+ "346": {
724
+ "content": "<extra_id_87>",
725
+ "lstrip": false,
726
+ "normalized": false,
727
+ "rstrip": false,
728
+ "single_word": false,
729
+ "special": true
730
+ },
731
+ "347": {
732
+ "content": "<extra_id_88>",
733
+ "lstrip": false,
734
+ "normalized": false,
735
+ "rstrip": false,
736
+ "single_word": false,
737
+ "special": true
738
+ },
739
+ "348": {
740
+ "content": "<extra_id_89>",
741
+ "lstrip": false,
742
+ "normalized": false,
743
+ "rstrip": false,
744
+ "single_word": false,
745
+ "special": true
746
+ },
747
+ "349": {
748
+ "content": "<extra_id_90>",
749
+ "lstrip": false,
750
+ "normalized": false,
751
+ "rstrip": false,
752
+ "single_word": false,
753
+ "special": true
754
+ },
755
+ "350": {
756
+ "content": "<extra_id_91>",
757
+ "lstrip": false,
758
+ "normalized": false,
759
+ "rstrip": false,
760
+ "single_word": false,
761
+ "special": true
762
+ },
763
+ "351": {
764
+ "content": "<extra_id_92>",
765
+ "lstrip": false,
766
+ "normalized": false,
767
+ "rstrip": false,
768
+ "single_word": false,
769
+ "special": true
770
+ },
771
+ "352": {
772
+ "content": "<extra_id_93>",
773
+ "lstrip": false,
774
+ "normalized": false,
775
+ "rstrip": false,
776
+ "single_word": false,
777
+ "special": true
778
+ },
779
+ "353": {
780
+ "content": "<extra_id_94>",
781
+ "lstrip": false,
782
+ "normalized": false,
783
+ "rstrip": false,
784
+ "single_word": false,
785
+ "special": true
786
+ },
787
+ "354": {
788
+ "content": "<extra_id_95>",
789
+ "lstrip": false,
790
+ "normalized": false,
791
+ "rstrip": false,
792
+ "single_word": false,
793
+ "special": true
794
+ },
795
+ "355": {
796
+ "content": "<extra_id_96>",
797
+ "lstrip": false,
798
+ "normalized": false,
799
+ "rstrip": false,
800
+ "single_word": false,
801
+ "special": true
802
+ },
803
+ "356": {
804
+ "content": "<extra_id_97>",
805
+ "lstrip": false,
806
+ "normalized": false,
807
+ "rstrip": false,
808
+ "single_word": false,
809
+ "special": true
810
+ },
811
+ "357": {
812
+ "content": "<extra_id_98>",
813
+ "lstrip": false,
814
+ "normalized": false,
815
+ "rstrip": false,
816
+ "single_word": false,
817
+ "special": true
818
+ },
819
+ "358": {
820
+ "content": "<extra_id_99>",
821
+ "lstrip": false,
822
+ "normalized": false,
823
+ "rstrip": false,
824
+ "single_word": false,
825
+ "special": true
826
+ },
827
+ "359": {
828
+ "content": "<extra_id_100>",
829
+ "lstrip": false,
830
+ "normalized": false,
831
+ "rstrip": false,
832
+ "single_word": false,
833
+ "special": true
834
+ },
835
+ "360": {
836
+ "content": "<extra_id_101>",
837
+ "lstrip": false,
838
+ "normalized": false,
839
+ "rstrip": false,
840
+ "single_word": false,
841
+ "special": true
842
+ },
843
+ "361": {
844
+ "content": "<extra_id_102>",
845
+ "lstrip": false,
846
+ "normalized": false,
847
+ "rstrip": false,
848
+ "single_word": false,
849
+ "special": true
850
+ },
851
+ "362": {
852
+ "content": "<extra_id_103>",
853
+ "lstrip": false,
854
+ "normalized": false,
855
+ "rstrip": false,
856
+ "single_word": false,
857
+ "special": true
858
+ },
859
+ "363": {
860
+ "content": "<extra_id_104>",
861
+ "lstrip": false,
862
+ "normalized": false,
863
+ "rstrip": false,
864
+ "single_word": false,
865
+ "special": true
866
+ },
867
+ "364": {
868
+ "content": "<extra_id_105>",
869
+ "lstrip": false,
870
+ "normalized": false,
871
+ "rstrip": false,
872
+ "single_word": false,
873
+ "special": true
874
+ },
875
+ "365": {
876
+ "content": "<extra_id_106>",
877
+ "lstrip": false,
878
+ "normalized": false,
879
+ "rstrip": false,
880
+ "single_word": false,
881
+ "special": true
882
+ },
883
+ "366": {
884
+ "content": "<extra_id_107>",
885
+ "lstrip": false,
886
+ "normalized": false,
887
+ "rstrip": false,
888
+ "single_word": false,
889
+ "special": true
890
+ },
891
+ "367": {
892
+ "content": "<extra_id_108>",
893
+ "lstrip": false,
894
+ "normalized": false,
895
+ "rstrip": false,
896
+ "single_word": false,
897
+ "special": true
898
+ },
899
+ "368": {
900
+ "content": "<extra_id_109>",
901
+ "lstrip": false,
902
+ "normalized": false,
903
+ "rstrip": false,
904
+ "single_word": false,
905
+ "special": true
906
+ },
907
+ "369": {
908
+ "content": "<extra_id_110>",
909
+ "lstrip": false,
910
+ "normalized": false,
911
+ "rstrip": false,
912
+ "single_word": false,
913
+ "special": true
914
+ },
915
+ "370": {
916
+ "content": "<extra_id_111>",
917
+ "lstrip": false,
918
+ "normalized": false,
919
+ "rstrip": false,
920
+ "single_word": false,
921
+ "special": true
922
+ },
923
+ "371": {
924
+ "content": "<extra_id_112>",
925
+ "lstrip": false,
926
+ "normalized": false,
927
+ "rstrip": false,
928
+ "single_word": false,
929
+ "special": true
930
+ },
931
+ "372": {
932
+ "content": "<extra_id_113>",
933
+ "lstrip": false,
934
+ "normalized": false,
935
+ "rstrip": false,
936
+ "single_word": false,
937
+ "special": true
938
+ },
939
+ "373": {
940
+ "content": "<extra_id_114>",
941
+ "lstrip": false,
942
+ "normalized": false,
943
+ "rstrip": false,
944
+ "single_word": false,
945
+ "special": true
946
+ },
947
+ "374": {
948
+ "content": "<extra_id_115>",
949
+ "lstrip": false,
950
+ "normalized": false,
951
+ "rstrip": false,
952
+ "single_word": false,
953
+ "special": true
954
+ },
955
+ "375": {
956
+ "content": "<extra_id_116>",
957
+ "lstrip": false,
958
+ "normalized": false,
959
+ "rstrip": false,
960
+ "single_word": false,
961
+ "special": true
962
+ },
963
+ "376": {
964
+ "content": "<extra_id_117>",
965
+ "lstrip": false,
966
+ "normalized": false,
967
+ "rstrip": false,
968
+ "single_word": false,
969
+ "special": true
970
+ },
971
+ "377": {
972
+ "content": "<extra_id_118>",
973
+ "lstrip": false,
974
+ "normalized": false,
975
+ "rstrip": false,
976
+ "single_word": false,
977
+ "special": true
978
+ },
979
+ "378": {
980
+ "content": "<extra_id_119>",
981
+ "lstrip": false,
982
+ "normalized": false,
983
+ "rstrip": false,
984
+ "single_word": false,
985
+ "special": true
986
+ },
987
+ "379": {
988
+ "content": "<extra_id_120>",
989
+ "lstrip": false,
990
+ "normalized": false,
991
+ "rstrip": false,
992
+ "single_word": false,
993
+ "special": true
994
+ },
995
+ "380": {
996
+ "content": "<extra_id_121>",
997
+ "lstrip": false,
998
+ "normalized": false,
999
+ "rstrip": false,
1000
+ "single_word": false,
1001
+ "special": true
1002
+ },
1003
+ "381": {
1004
+ "content": "<extra_id_122>",
1005
+ "lstrip": false,
1006
+ "normalized": false,
1007
+ "rstrip": false,
1008
+ "single_word": false,
1009
+ "special": true
1010
+ },
1011
+ "382": {
1012
+ "content": "<extra_id_123>",
1013
+ "lstrip": false,
1014
+ "normalized": false,
1015
+ "rstrip": false,
1016
+ "single_word": false,
1017
+ "special": true
1018
+ },
1019
+ "383": {
1020
+ "content": "<extra_id_124>",
1021
+ "lstrip": false,
1022
+ "normalized": false,
1023
+ "rstrip": false,
1024
+ "single_word": false,
1025
+ "special": true
1026
+ }
1027
+ },
1028
+ "additional_special_tokens": [
1029
+ "<extra_id_0>",
1030
+ "<extra_id_1>",
1031
+ "<extra_id_2>",
1032
+ "<extra_id_3>",
1033
+ "<extra_id_4>",
1034
+ "<extra_id_5>",
1035
+ "<extra_id_6>",
1036
+ "<extra_id_7>",
1037
+ "<extra_id_8>",
1038
+ "<extra_id_9>",
1039
+ "<extra_id_10>",
1040
+ "<extra_id_11>",
1041
+ "<extra_id_12>",
1042
+ "<extra_id_13>",
1043
+ "<extra_id_14>",
1044
+ "<extra_id_15>",
1045
+ "<extra_id_16>",
1046
+ "<extra_id_17>",
1047
+ "<extra_id_18>",
1048
+ "<extra_id_19>",
1049
+ "<extra_id_20>",
1050
+ "<extra_id_21>",
1051
+ "<extra_id_22>",
1052
+ "<extra_id_23>",
1053
+ "<extra_id_24>",
1054
+ "<extra_id_25>",
1055
+ "<extra_id_26>",
1056
+ "<extra_id_27>",
1057
+ "<extra_id_28>",
1058
+ "<extra_id_29>",
1059
+ "<extra_id_30>",
1060
+ "<extra_id_31>",
1061
+ "<extra_id_32>",
1062
+ "<extra_id_33>",
1063
+ "<extra_id_34>",
1064
+ "<extra_id_35>",
1065
+ "<extra_id_36>",
1066
+ "<extra_id_37>",
1067
+ "<extra_id_38>",
1068
+ "<extra_id_39>",
1069
+ "<extra_id_40>",
1070
+ "<extra_id_41>",
1071
+ "<extra_id_42>",
1072
+ "<extra_id_43>",
1073
+ "<extra_id_44>",
1074
+ "<extra_id_45>",
1075
+ "<extra_id_46>",
1076
+ "<extra_id_47>",
1077
+ "<extra_id_48>",
1078
+ "<extra_id_49>",
1079
+ "<extra_id_50>",
1080
+ "<extra_id_51>",
1081
+ "<extra_id_52>",
1082
+ "<extra_id_53>",
1083
+ "<extra_id_54>",
1084
+ "<extra_id_55>",
1085
+ "<extra_id_56>",
1086
+ "<extra_id_57>",
1087
+ "<extra_id_58>",
1088
+ "<extra_id_59>",
1089
+ "<extra_id_60>",
1090
+ "<extra_id_61>",
1091
+ "<extra_id_62>",
1092
+ "<extra_id_63>",
1093
+ "<extra_id_64>",
1094
+ "<extra_id_65>",
1095
+ "<extra_id_66>",
1096
+ "<extra_id_67>",
1097
+ "<extra_id_68>",
1098
+ "<extra_id_69>",
1099
+ "<extra_id_70>",
1100
+ "<extra_id_71>",
1101
+ "<extra_id_72>",
1102
+ "<extra_id_73>",
1103
+ "<extra_id_74>",
1104
+ "<extra_id_75>",
1105
+ "<extra_id_76>",
1106
+ "<extra_id_77>",
1107
+ "<extra_id_78>",
1108
+ "<extra_id_79>",
1109
+ "<extra_id_80>",
1110
+ "<extra_id_81>",
1111
+ "<extra_id_82>",
1112
+ "<extra_id_83>",
1113
+ "<extra_id_84>",
1114
+ "<extra_id_85>",
1115
+ "<extra_id_86>",
1116
+ "<extra_id_87>",
1117
+ "<extra_id_88>",
1118
+ "<extra_id_89>",
1119
+ "<extra_id_90>",
1120
+ "<extra_id_91>",
1121
+ "<extra_id_92>",
1122
+ "<extra_id_93>",
1123
+ "<extra_id_94>",
1124
+ "<extra_id_95>",
1125
+ "<extra_id_96>",
1126
+ "<extra_id_97>",
1127
+ "<extra_id_98>",
1128
+ "<extra_id_99>",
1129
+ "<extra_id_100>",
1130
+ "<extra_id_101>",
1131
+ "<extra_id_102>",
1132
+ "<extra_id_103>",
1133
+ "<extra_id_104>",
1134
+ "<extra_id_105>",
1135
+ "<extra_id_106>",
1136
+ "<extra_id_107>",
1137
+ "<extra_id_108>",
1138
+ "<extra_id_109>",
1139
+ "<extra_id_110>",
1140
+ "<extra_id_111>",
1141
+ "<extra_id_112>",
1142
+ "<extra_id_113>",
1143
+ "<extra_id_114>",
1144
+ "<extra_id_115>",
1145
+ "<extra_id_116>",
1146
+ "<extra_id_117>",
1147
+ "<extra_id_118>",
1148
+ "<extra_id_119>",
1149
+ "<extra_id_120>",
1150
+ "<extra_id_121>",
1151
+ "<extra_id_122>",
1152
+ "<extra_id_123>",
1153
+ "<extra_id_124>"
1154
+ ],
1155
+ "clean_up_tokenization_spaces": false,
1156
+ "eos_token": "</s>",
1157
+ "extra_ids": 0,
1158
+ "extra_special_tokens": {},
1159
+ "model_max_length": 1000000000000000019884624838656,
1160
+ "pad_token": "<pad>",
1161
+ "tokenizer_class": "MyT5Tokenizer",
1162
+ "unk_token": "<unk>"
1163
+ }
train_stable_myt5-large.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import gc
4
+ import logging
5
+ import torch
6
+ import pickle
7
+ from torch.utils.data import Dataset
8
+ from transformers import (
9
+ AutoModelForSeq2SeqLM,
10
+ AutoTokenizer,
11
+ TrainingArguments,
12
+ Trainer,
13
+ TrainerCallback,
14
+ DataCollatorForSeq2Seq,
15
+ )
16
+
17
+ # CONFIGURATION
18
+ MAX_ITEMS = None
19
+ MAX_LENGTH = 256
20
+ PER_DEVICE_BATCH = 1
21
+ GRAD_ACC_STEPS = 16 # Increased due to higher MAX_LENGTH
22
+ LEARNING_RATE = 5e-5
23
+ NUM_TRAIN_EPOCHS = 1
24
+ WARMUP_STEPS = 200
25
+ FP16_TRAINING = False # fix windows
26
+ OPTIMIZER_CHOICE = "adamw_8bit"
27
+ MAX_GRAD_NORM_CLIP = 0.0
28
+ GRADIENT_CHECKPOINTING = True
29
+ LOGGING_STEPS = 50
30
+ SAVE_STEPS = 1000
31
+ EVAL_STEPS = 500
32
+ SAVE_TOTAL_LIMIT = 20 # each 7GB
33
+ FIXED_PROMPT_FOR_GENERATION = "Create stable diffusion metadata based on the given english description. a futuristic city"
34
+
35
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s — %(levelname)s — %(name)s — %(message)s")
36
+ log = logging.getLogger(__name__)
37
+
38
+ class SDPromptDataset(Dataset):
39
+ def __init__(self, raw_data_list, tokenizer, max_length, dataset_type="train", cache_dir="cache"):
40
+ self.raw_data = raw_data_list
41
+ self.tokenizer = tokenizer
42
+ self.max_length = max_length
43
+ self.dataset_type = dataset_type
44
+
45
+ os.makedirs(cache_dir, exist_ok=True)
46
+ cache_file = os.path.join(cache_dir, f"{dataset_type}_{len(raw_data_list)}_{max_length}.pkl")
47
+
48
+ if os.path.exists(cache_file):
49
+ log.info(f"Loading cached {dataset_type} dataset from {cache_file}")
50
+ with open(cache_file, 'rb') as f:
51
+ self.examples = pickle.load(f)
52
+ log.info(f"Loaded {len(self.examples)} cached examples for {dataset_type}")
53
+ else:
54
+ log.info(f"Tokenizing {len(raw_data_list)} samples for {dataset_type} with {type(tokenizer).__name__}...")
55
+ self.examples = []
56
+
57
+ for i, item in enumerate(raw_data_list):
58
+ if i > 0 and (i % 5000 == 0 or i == len(raw_data_list) - 1):
59
+ log.info(f"Tokenized {i+1} / {len(raw_data_list)} samples for {dataset_type}")
60
+
61
+ instruction = item.get("instruction", "")
62
+ output = item.get("output", "")
63
+
64
+ input_encoding = tokenizer(
65
+ instruction, max_length=max_length, padding="max_length",
66
+ truncation=True, return_tensors="pt",
67
+ )
68
+
69
+ if self.dataset_type == "train" or (self.dataset_type == "eval" and output):
70
+ target_encoding = tokenizer(
71
+ output, max_length=max_length, padding="max_length",
72
+ truncation=True, return_tensors="pt",
73
+ )
74
+ labels = target_encoding["input_ids"].squeeze()
75
+ labels[labels == tokenizer.pad_token_id] = -100
76
+ else:
77
+ labels = None
78
+
79
+ example_data = {
80
+ "input_ids": input_encoding["input_ids"].squeeze(),
81
+ "attention_mask": input_encoding["attention_mask"].squeeze(),
82
+ }
83
+ if labels is not None:
84
+ example_data["labels"] = labels
85
+
86
+ self.examples.append(example_data)
87
+
88
+ log.info(f"Tokenization complete for {dataset_type}. Saving cache to {cache_file}")
89
+ with open(cache_file, 'wb') as f:
90
+ pickle.dump(self.examples, f)
91
+ log.info(f"Cache saved successfully")
92
+
93
+ def __len__(self):
94
+ return len(self.examples)
95
+
96
+ def __getitem__(self, idx):
97
+ return self.examples[idx]
98
+
99
+ def get_raw_example(self, idx):
100
+ return self.raw_data[idx]
101
+
102
+ def load_and_split_json_data(data_path, max_items_from_config=None):
103
+ log.info(f"Loading data from {data_path}...")
104
+ if not os.path.exists(data_path):
105
+ log.error(f"Data file not found: {data_path}")
106
+ raise FileNotFoundError(f"Data file not found: {data_path}")
107
+
108
+ with open(data_path, "r", encoding="utf-8") as f:
109
+ all_data = json.load(f)
110
+ log.info(f"Successfully loaded {len(all_data)} total items from JSON.")
111
+
112
+ if max_items_from_config is not None and max_items_from_config > 0:
113
+ num_to_take = min(max_items_from_config, len(all_data))
114
+ log.info(f"Keeping the first {num_to_take} samples as per MAX_ITEMS config.")
115
+ all_data = all_data[:num_to_take]
116
+ else:
117
+ log.info("Using the full dataset.")
118
+
119
+ if not all_data:
120
+ log.error("No data loaded or remaining.")
121
+ raise ValueError("No data to process.")
122
+
123
+ if len(all_data) < 20:
124
+ split_idx = max(1, int(0.5 * len(all_data)))
125
+ log.warning(f"Dataset very small ({len(all_data)} items). Adjusting split.")
126
+ else:
127
+ split_idx = int(0.9 * len(all_data))
128
+ split_idx = max(1, split_idx)
129
+
130
+ train_data = all_data[:split_idx]
131
+ val_data = all_data[split_idx:]
132
+
133
+ if not val_data and train_data:
134
+ val_data = [train_data[-1]]
135
+ log.warning("Validation set was empty after split, using one sample from training data for validation.")
136
+ if len(train_data) > 1:
137
+ train_data = train_data[:-1]
138
+
139
+ val_data = val_data[:min(len(val_data), 2000)] if val_data else None
140
+
141
+ if not train_data:
142
+ log.error("Training data empty.")
143
+ raise ValueError("Training data empty.")
144
+
145
+ log.info(f"Train samples: {len(train_data)}, Validation samples: {len(val_data) if val_data else 0}")
146
+ return train_data, val_data
147
+
148
+ def find_latest_checkpoint(output_dir):
149
+ if not os.path.isdir(output_dir):
150
+ return None
151
+
152
+ checkpoints = [d for d in os.listdir(output_dir) if d.startswith("checkpoint-") and os.path.isdir(os.path.join(output_dir, d))]
153
+ if not checkpoints:
154
+ return None
155
+
156
+ checkpoints.sort(key=lambda x: int(x.split('-')[-1]))
157
+ latest_checkpoint = os.path.join(output_dir, checkpoints[-1])
158
+
159
+ if os.path.exists(os.path.join(latest_checkpoint, "pytorch_model.bin")) or os.path.exists(os.path.join(latest_checkpoint, "model.safetensors")):
160
+ return latest_checkpoint
161
+
162
+ return None
163
+
164
+ def clear_cuda_cache():
165
+ log.info("Clearing CUDA cache...")
166
+ gc.collect()
167
+ if torch.cuda.is_available():
168
+ torch.cuda.empty_cache()
169
+
170
+ def generate_and_log_fixed_sample(model, tokenizer, prompt_text, device, log_prefix="Sample"):
171
+ log.info(f"\n--- {log_prefix} Generation ---")
172
+ log.info(f"Input Prompt: {prompt_text}")
173
+ model.eval()
174
+ inputs = tokenizer(prompt_text, return_tensors="pt", max_length=MAX_LENGTH, truncation=True)
175
+ inputs = {k: v.to(device) for k, v in inputs.items()}
176
+ with torch.no_grad():
177
+ outputs = model.generate(
178
+ **inputs, max_length=MAX_LENGTH + 50,
179
+ num_beams=5, early_stopping=True, no_repeat_ngram_size=3,
180
+ temperature=0.7, top_k=50, top_p=0.95
181
+ )
182
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
183
+ log.info(f"Generated Output: {generated_text}")
184
+ log.info(f"--- End {log_prefix} Generation ---\n")
185
+
186
+ class ShowFixedEvalSampleCallback(TrainerCallback):
187
+ def __init__(self, tokenizer, prompt_text):
188
+ self.tokenizer = tokenizer
189
+ self.prompt_text = prompt_text
190
+
191
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
192
+ if model is None:
193
+ return
194
+ device = next(model.parameters()).device
195
+ generate_and_log_fixed_sample(model, self.tokenizer, self.prompt_text, device, log_prefix="Evaluation Callback Sample")
196
+ model.train()
197
+
198
+ def Train(model_id: str, output_dir: str, data_path: str):
199
+ os.makedirs(output_dir, exist_ok=True)
200
+ clear_cuda_cache()
201
+
202
+ # Check for existing checkpoint to resume
203
+ resume_from_checkpoint = find_latest_checkpoint(output_dir)
204
+ if resume_from_checkpoint:
205
+ log.info(f"Found checkpoint to resume from: {resume_from_checkpoint}")
206
+ else:
207
+ log.info("No existing checkpoint found, starting fresh training")
208
+
209
+ log.info(f"Attempting to load MyT5Tokenizer for {model_id} (trust_remote_code=True).")
210
+ try:
211
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
212
+ log.info(f"Successfully loaded tokenizer: {type(tokenizer).__name__}")
213
+ except Exception as e:
214
+ log.error(f"Failed to load tokenizer for {model_id} (trust_remote_code=True): {e}")
215
+ return
216
+
217
+ train_raw_data, eval_raw_data = load_and_split_json_data(data_path, max_items_from_config=MAX_ITEMS)
218
+ if not train_raw_data:
219
+ return
220
+
221
+ train_dataset = SDPromptDataset(train_raw_data, tokenizer, MAX_LENGTH, dataset_type="train")
222
+ eval_dataset = SDPromptDataset(eval_raw_data, tokenizer, MAX_LENGTH, dataset_type="eval") if eval_raw_data else None
223
+
224
+ log.info(f"Loading model: {model_id}")
225
+ model = AutoModelForSeq2SeqLM.from_pretrained(
226
+ model_id,
227
+ torch_dtype=torch.float16 if FP16_TRAINING else torch.float32,
228
+ device_map="auto",
229
+ low_cpu_mem_usage=True,
230
+ )
231
+
232
+ if GRADIENT_CHECKPOINTING:
233
+ model.gradient_checkpointing_enable()
234
+ log.info("Grad-ckpt enabled.")
235
+
236
+ if OPTIMIZER_CHOICE == "adamw_8bit":
237
+ try:
238
+ import bitsandbytes
239
+ log.info(f"bitsandbytes version: {bitsandbytes.__version__} imported for adamw_8bit.")
240
+ except ImportError:
241
+ log.error("bitsandbytes not installed, required for optim='adamw_8bit'. Install: pip install bitsandbytes")
242
+ return
243
+
244
+ training_args = TrainingArguments(
245
+ output_dir=output_dir,
246
+ per_device_train_batch_size=PER_DEVICE_BATCH,
247
+ per_device_eval_batch_size=PER_DEVICE_BATCH * 2,
248
+ gradient_accumulation_steps=GRAD_ACC_STEPS,
249
+ learning_rate=LEARNING_RATE,
250
+ num_train_epochs=NUM_TRAIN_EPOCHS,
251
+ warmup_steps=WARMUP_STEPS,
252
+ logging_steps=LOGGING_STEPS,
253
+ save_strategy="steps",
254
+ save_steps=SAVE_STEPS,
255
+ eval_strategy="steps" if eval_dataset else "no",
256
+ eval_steps=EVAL_STEPS if eval_dataset else None,
257
+ save_total_limit=SAVE_TOTAL_LIMIT,
258
+ load_best_model_at_end=True if eval_dataset else False,
259
+ fp16=FP16_TRAINING,
260
+ optim=OPTIMIZER_CHOICE,
261
+ max_grad_norm=MAX_GRAD_NORM_CLIP,
262
+ gradient_checkpointing=GRADIENT_CHECKPOINTING,
263
+ group_by_length=True,
264
+ lr_scheduler_type="cosine",
265
+ weight_decay=0.01,
266
+ report_to="none",
267
+ )
268
+
269
+ fixed_sample_callback = ShowFixedEvalSampleCallback(tokenizer=tokenizer, prompt_text=FIXED_PROMPT_FOR_GENERATION)
270
+ callbacks_to_use = [fixed_sample_callback] if eval_dataset else []
271
+
272
+ data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, padding="longest")
273
+ trainer = Trainer(
274
+ model=model,
275
+ args=training_args,
276
+ train_dataset=train_dataset,
277
+ eval_dataset=eval_dataset,
278
+ data_collator=data_collator,
279
+ tokenizer=tokenizer,
280
+ callbacks=callbacks_to_use
281
+ )
282
+
283
+ log.info(f"Starting training with FP16_TRAINING={FP16_TRAINING}, optim='{OPTIMIZER_CHOICE}', LR={LEARNING_RATE}, GradClip={MAX_GRAD_NORM_CLIP}...")
284
+ try:
285
+ trainer.train(resume_from_checkpoint=resume_from_checkpoint)
286
+ except Exception as e:
287
+ log.exception(f"Unhandled error during trainer.train(): {e}")
288
+ return
289
+
290
+ log.info("Training completed.")
291
+ try:
292
+ final_model_path = os.path.join(output_dir, "final_model_after_train")
293
+ if not os.path.exists(final_model_path):
294
+ trainer.save_model(final_model_path)
295
+ log.info(f"Final model state explicitly saved to {final_model_path}")
296
+ else:
297
+ log.info(f"Best model was likely saved by load_best_model_at_end to a checkpoint within {output_dir}")
298
+ except Exception as e:
299
+ log.exception(f"Error saving final explicit model: {e}")
300
+ log.info("Train function finished.")
301
+
302
+ def Inference(base_model_id_for_tokenizer: str, trained_model_output_dir: str):
303
+ log.info(f"\n--- Starting Inference ---")
304
+
305
+ path_to_load_model_from = trained_model_output_dir
306
+ potential_final_model = os.path.join(trained_model_output_dir, "final_model_after_train")
307
+
308
+ if os.path.exists(potential_final_model) and (os.path.exists(os.path.join(potential_final_model, "pytorch_model.bin")) or os.path.exists(os.path.join(potential_final_model, "model.safetensors"))):
309
+ path_to_load_model_from = potential_final_model
310
+ log.info(f"Found 'final_model_after_train' at: {path_to_load_model_from}")
311
+ else:
312
+ latest_checkpoint = find_latest_checkpoint(trained_model_output_dir)
313
+ if latest_checkpoint:
314
+ path_to_load_model_from = latest_checkpoint
315
+ log.info(f"Found latest checkpoint: {path_to_load_model_from}")
316
+ elif not (os.path.exists(os.path.join(path_to_load_model_from, "pytorch_model.bin")) or os.path.exists(os.path.join(path_to_load_model_from, "model.safetensors"))):
317
+ log.error(f"No valid model found in {trained_model_output_dir} or its subdirectories. Cannot run inference.")
318
+ return
319
+
320
+ log.info(f"Attempting to load fine-tuned model from: {path_to_load_model_from}")
321
+
322
+ try:
323
+ model = AutoModelForSeq2SeqLM.from_pretrained(path_to_load_model_from, device_map="auto")
324
+ try:
325
+ tokenizer = AutoTokenizer.from_pretrained(path_to_load_model_from, trust_remote_code=True)
326
+ except Exception:
327
+ log.warning(f"Could not load tokenizer from {path_to_load_model_from}, trying base {base_model_id_for_tokenizer}")
328
+ tokenizer = AutoTokenizer.from_pretrained(base_model_id_for_tokenizer, trust_remote_code=True)
329
+ log.info(f"Successfully loaded model and tokenizer for inference. Model is on: {model.device}")
330
+ except Exception as e:
331
+ log.error(f"Failed to load model or tokenizer for inference: {e}")
332
+ return
333
+
334
+ device = next(model.parameters()).device
335
+ generate_and_log_fixed_sample(model, tokenizer, FIXED_PROMPT_FOR_GENERATION, device, log_prefix="Final Inference")
336
+ log.info(f"--- Inference Demo Finished ---")
337
+
338
+ def main():
339
+ Train('Tomlim/myt5-large', 'trained_model', 'DiscordPromptSD.json')
340
+ Inference('Tomlim/myt5-large', 'trained_model')
341
+
342
+ if __name__ == "__main__":
343
+ main()
344
+
345
+ # - SFW Cyberpunk City: `Nikon Z9 200mm f_8 ISO 160, (giant rifle structure), flawless ornate architecture, cyberpunk, neon lights, busy street, realistic, ray tracing, hasselblad`
346
+ # - **SFW Fantasy Dragon Rider: `masterpiece, best quality, cinematic lighting, 1girl, solo, <lora:add_detail:0.55>`
347
+ # - **NSFW Anime Succubus: `masterpiece, best quality, highly detailed background, intricate, 1girl, (full-face blush, aroused:1.3), long hair, medium breasts, nipples`
trainer_state.json ADDED
@@ -0,0 +1,3434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 24500,
3
+ "best_metric": 0.5816783905029297,
4
+ "best_model_checkpoint": "trained_model\\checkpoint-19000",
5
+ "epoch": 0.7204104898971434,
6
+ "eval_steps": 500,
7
+ "global_step": 25000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0014408209797942867,
14
+ "learning_rate": 1.225e-05,
15
+ "loss": 4.8426,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 0.0028816419595885734,
20
+ "learning_rate": 2.4750000000000002e-05,
21
+ "loss": 1.8657,
22
+ "step": 100
23
+ },
24
+ {
25
+ "epoch": 0.004322462939382861,
26
+ "learning_rate": 3.7250000000000004e-05,
27
+ "loss": 1.6201,
28
+ "step": 150
29
+ },
30
+ {
31
+ "epoch": 0.005763283919177147,
32
+ "learning_rate": 4.975e-05,
33
+ "loss": 1.4427,
34
+ "step": 200
35
+ },
36
+ {
37
+ "epoch": 0.007204104898971434,
38
+ "learning_rate": 4.999975117874187e-05,
39
+ "loss": 1.3279,
40
+ "step": 250
41
+ },
42
+ {
43
+ "epoch": 0.008644925878765721,
44
+ "learning_rate": 4.99989843045885e-05,
45
+ "loss": 1.2472,
46
+ "step": 300
47
+ },
48
+ {
49
+ "epoch": 0.010085746858560007,
50
+ "learning_rate": 4.9997699289801957e-05,
51
+ "loss": 1.2063,
52
+ "step": 350
53
+ },
54
+ {
55
+ "epoch": 0.011526567838354294,
56
+ "learning_rate": 4.999589616101606e-05,
57
+ "loss": 1.1545,
58
+ "step": 400
59
+ },
60
+ {
61
+ "epoch": 0.012967388818148582,
62
+ "learning_rate": 4.9993574955603304e-05,
63
+ "loss": 1.1197,
64
+ "step": 450
65
+ },
66
+ {
67
+ "epoch": 0.014408209797942868,
68
+ "learning_rate": 4.9990735721674075e-05,
69
+ "loss": 1.0864,
70
+ "step": 500
71
+ },
72
+ {
73
+ "epoch": 0.014408209797942868,
74
+ "eval_loss": 1.0425041913986206,
75
+ "eval_runtime": 80.2488,
76
+ "eval_samples_per_second": 24.922,
77
+ "eval_steps_per_second": 12.461,
78
+ "step": 500
79
+ },
80
+ {
81
+ "epoch": 0.015849030777737156,
82
+ "learning_rate": 4.9987378518075656e-05,
83
+ "loss": 1.0661,
84
+ "step": 550
85
+ },
86
+ {
87
+ "epoch": 0.017289851757531442,
88
+ "learning_rate": 4.998350341439103e-05,
89
+ "loss": 1.0467,
90
+ "step": 600
91
+ },
92
+ {
93
+ "epoch": 0.01873067273732573,
94
+ "learning_rate": 4.997911049093741e-05,
95
+ "loss": 1.0132,
96
+ "step": 650
97
+ },
98
+ {
99
+ "epoch": 0.020171493717120015,
100
+ "learning_rate": 4.9974199838764554e-05,
101
+ "loss": 1.0067,
102
+ "step": 700
103
+ },
104
+ {
105
+ "epoch": 0.0216123146969143,
106
+ "learning_rate": 4.9968771559652967e-05,
107
+ "loss": 1.0036,
108
+ "step": 750
109
+ },
110
+ {
111
+ "epoch": 0.023053135676708587,
112
+ "learning_rate": 4.996282576611168e-05,
113
+ "loss": 0.9847,
114
+ "step": 800
115
+ },
116
+ {
117
+ "epoch": 0.024493956656502874,
118
+ "learning_rate": 4.9956362581375996e-05,
119
+ "loss": 0.975,
120
+ "step": 850
121
+ },
122
+ {
123
+ "epoch": 0.025934777636297163,
124
+ "learning_rate": 4.994938213940489e-05,
125
+ "loss": 0.9665,
126
+ "step": 900
127
+ },
128
+ {
129
+ "epoch": 0.02737559861609145,
130
+ "learning_rate": 4.994188458487828e-05,
131
+ "loss": 0.9548,
132
+ "step": 950
133
+ },
134
+ {
135
+ "epoch": 0.028816419595885736,
136
+ "learning_rate": 4.9933870073193985e-05,
137
+ "loss": 0.9419,
138
+ "step": 1000
139
+ },
140
+ {
141
+ "epoch": 0.028816419595885736,
142
+ "eval_loss": 0.8812388777732849,
143
+ "eval_runtime": 80.8889,
144
+ "eval_samples_per_second": 24.725,
145
+ "eval_steps_per_second": 12.363,
146
+ "step": 1000
147
+ },
148
+ {
149
+ "epoch": 0.030257240575680022,
150
+ "learning_rate": 4.992533877046451e-05,
151
+ "loss": 0.9482,
152
+ "step": 1050
153
+ },
154
+ {
155
+ "epoch": 0.03169806155547431,
156
+ "learning_rate": 4.991629085351363e-05,
157
+ "loss": 0.9323,
158
+ "step": 1100
159
+ },
160
+ {
161
+ "epoch": 0.033138882535268595,
162
+ "learning_rate": 4.99067265098727e-05,
163
+ "loss": 0.9204,
164
+ "step": 1150
165
+ },
166
+ {
167
+ "epoch": 0.034579703515062885,
168
+ "learning_rate": 4.9896645937776775e-05,
169
+ "loss": 0.9359,
170
+ "step": 1200
171
+ },
172
+ {
173
+ "epoch": 0.03602052449485717,
174
+ "learning_rate": 4.98860493461605e-05,
175
+ "loss": 0.9161,
176
+ "step": 1250
177
+ },
178
+ {
179
+ "epoch": 0.03746134547465146,
180
+ "learning_rate": 4.98749369546538e-05,
181
+ "loss": 0.9078,
182
+ "step": 1300
183
+ },
184
+ {
185
+ "epoch": 0.03890216645444574,
186
+ "learning_rate": 4.986330899357729e-05,
187
+ "loss": 0.8987,
188
+ "step": 1350
189
+ },
190
+ {
191
+ "epoch": 0.04034298743424003,
192
+ "learning_rate": 4.985116570393751e-05,
193
+ "loss": 0.8908,
194
+ "step": 1400
195
+ },
196
+ {
197
+ "epoch": 0.04178380841403432,
198
+ "learning_rate": 4.983850733742197e-05,
199
+ "loss": 0.8959,
200
+ "step": 1450
201
+ },
202
+ {
203
+ "epoch": 0.0432246293938286,
204
+ "learning_rate": 4.9825334156393885e-05,
205
+ "loss": 0.8728,
206
+ "step": 1500
207
+ },
208
+ {
209
+ "epoch": 0.0432246293938286,
210
+ "eval_loss": 0.8146671652793884,
211
+ "eval_runtime": 79.7347,
212
+ "eval_samples_per_second": 25.083,
213
+ "eval_steps_per_second": 12.542,
214
+ "step": 1500
215
+ },
216
+ {
217
+ "epoch": 0.04466545037362289,
218
+ "learning_rate": 4.9811646433886774e-05,
219
+ "loss": 0.8628,
220
+ "step": 1550
221
+ },
222
+ {
223
+ "epoch": 0.046106271353417175,
224
+ "learning_rate": 4.979744445359876e-05,
225
+ "loss": 0.8627,
226
+ "step": 1600
227
+ },
228
+ {
229
+ "epoch": 0.047547092333211464,
230
+ "learning_rate": 4.978272850988673e-05,
231
+ "loss": 0.8474,
232
+ "step": 1650
233
+ },
234
+ {
235
+ "epoch": 0.04898791331300575,
236
+ "learning_rate": 4.976749890776021e-05,
237
+ "loss": 0.8706,
238
+ "step": 1700
239
+ },
240
+ {
241
+ "epoch": 0.05042873429280004,
242
+ "learning_rate": 4.9751755962875054e-05,
243
+ "loss": 0.8522,
244
+ "step": 1750
245
+ },
246
+ {
247
+ "epoch": 0.05186955527259433,
248
+ "learning_rate": 4.9735500001526894e-05,
249
+ "loss": 0.8411,
250
+ "step": 1800
251
+ },
252
+ {
253
+ "epoch": 0.05331037625238861,
254
+ "learning_rate": 4.971873136064438e-05,
255
+ "loss": 0.8373,
256
+ "step": 1850
257
+ },
258
+ {
259
+ "epoch": 0.0547511972321829,
260
+ "learning_rate": 4.970145038778221e-05,
261
+ "loss": 0.8577,
262
+ "step": 1900
263
+ },
264
+ {
265
+ "epoch": 0.05619201821197718,
266
+ "learning_rate": 4.9683657441113884e-05,
267
+ "loss": 0.8387,
268
+ "step": 1950
269
+ },
270
+ {
271
+ "epoch": 0.05763283919177147,
272
+ "learning_rate": 4.9665352889424354e-05,
273
+ "loss": 0.8216,
274
+ "step": 2000
275
+ },
276
+ {
277
+ "epoch": 0.05763283919177147,
278
+ "eval_loss": 0.7666909098625183,
279
+ "eval_runtime": 79.8485,
280
+ "eval_samples_per_second": 25.047,
281
+ "eval_steps_per_second": 12.524,
282
+ "step": 2000
283
+ },
284
+ {
285
+ "epoch": 0.059073660171565755,
286
+ "learning_rate": 4.964653711210231e-05,
287
+ "loss": 0.8395,
288
+ "step": 2050
289
+ },
290
+ {
291
+ "epoch": 0.060514481151360044,
292
+ "learning_rate": 4.962721049913233e-05,
293
+ "loss": 0.8323,
294
+ "step": 2100
295
+ },
296
+ {
297
+ "epoch": 0.061955302131154334,
298
+ "learning_rate": 4.960737345108685e-05,
299
+ "loss": 0.8028,
300
+ "step": 2150
301
+ },
302
+ {
303
+ "epoch": 0.06339612311094862,
304
+ "learning_rate": 4.958702637911779e-05,
305
+ "loss": 0.818,
306
+ "step": 2200
307
+ },
308
+ {
309
+ "epoch": 0.0648369440907429,
310
+ "learning_rate": 4.9566169704948065e-05,
311
+ "loss": 0.8121,
312
+ "step": 2250
313
+ },
314
+ {
315
+ "epoch": 0.06627776507053719,
316
+ "learning_rate": 4.9544803860862856e-05,
317
+ "loss": 0.8276,
318
+ "step": 2300
319
+ },
320
+ {
321
+ "epoch": 0.06771858605033147,
322
+ "learning_rate": 4.952292928970065e-05,
323
+ "loss": 0.8066,
324
+ "step": 2350
325
+ },
326
+ {
327
+ "epoch": 0.06915940703012577,
328
+ "learning_rate": 4.950054644484401e-05,
329
+ "loss": 0.8043,
330
+ "step": 2400
331
+ },
332
+ {
333
+ "epoch": 0.07060022800992005,
334
+ "learning_rate": 4.9477655790210256e-05,
335
+ "loss": 0.7998,
336
+ "step": 2450
337
+ },
338
+ {
339
+ "epoch": 0.07204104898971433,
340
+ "learning_rate": 4.945425780024179e-05,
341
+ "loss": 0.8015,
342
+ "step": 2500
343
+ },
344
+ {
345
+ "epoch": 0.07204104898971433,
346
+ "eval_loss": 0.7467618584632874,
347
+ "eval_runtime": 82.6828,
348
+ "eval_samples_per_second": 24.189,
349
+ "eval_steps_per_second": 12.094,
350
+ "step": 2500
351
+ },
352
+ {
353
+ "epoch": 0.07348186996950863,
354
+ "learning_rate": 4.943035295989629e-05,
355
+ "loss": 0.8048,
356
+ "step": 2550
357
+ },
358
+ {
359
+ "epoch": 0.07492269094930291,
360
+ "learning_rate": 4.940594176463665e-05,
361
+ "loss": 0.7857,
362
+ "step": 2600
363
+ },
364
+ {
365
+ "epoch": 0.0763635119290972,
366
+ "learning_rate": 4.938102472042071e-05,
367
+ "loss": 0.8022,
368
+ "step": 2650
369
+ },
370
+ {
371
+ "epoch": 0.07780433290889148,
372
+ "learning_rate": 4.935560234369078e-05,
373
+ "loss": 0.7792,
374
+ "step": 2700
375
+ },
376
+ {
377
+ "epoch": 0.07924515388868578,
378
+ "learning_rate": 4.932967516136291e-05,
379
+ "loss": 0.7967,
380
+ "step": 2750
381
+ },
382
+ {
383
+ "epoch": 0.08068597486848006,
384
+ "learning_rate": 4.9303243710816006e-05,
385
+ "loss": 0.7749,
386
+ "step": 2800
387
+ },
388
+ {
389
+ "epoch": 0.08212679584827434,
390
+ "learning_rate": 4.927630853988068e-05,
391
+ "loss": 0.7777,
392
+ "step": 2850
393
+ },
394
+ {
395
+ "epoch": 0.08356761682806864,
396
+ "learning_rate": 4.924887020682785e-05,
397
+ "loss": 0.7793,
398
+ "step": 2900
399
+ },
400
+ {
401
+ "epoch": 0.08500843780786292,
402
+ "learning_rate": 4.922092928035725e-05,
403
+ "loss": 0.7788,
404
+ "step": 2950
405
+ },
406
+ {
407
+ "epoch": 0.0864492587876572,
408
+ "learning_rate": 4.919248633958557e-05,
409
+ "loss": 0.7706,
410
+ "step": 3000
411
+ },
412
+ {
413
+ "epoch": 0.0864492587876572,
414
+ "eval_loss": 0.7307117581367493,
415
+ "eval_runtime": 91.6423,
416
+ "eval_samples_per_second": 21.824,
417
+ "eval_steps_per_second": 10.912,
418
+ "step": 3000
419
+ },
420
+ {
421
+ "epoch": 0.08789007976745149,
422
+ "learning_rate": 4.91635419740345e-05,
423
+ "loss": 0.7728,
424
+ "step": 3050
425
+ },
426
+ {
427
+ "epoch": 0.08933090074724578,
428
+ "learning_rate": 4.913409678361849e-05,
429
+ "loss": 0.7687,
430
+ "step": 3100
431
+ },
432
+ {
433
+ "epoch": 0.09077172172704007,
434
+ "learning_rate": 4.910415137863232e-05,
435
+ "loss": 0.7868,
436
+ "step": 3150
437
+ },
438
+ {
439
+ "epoch": 0.09221254270683435,
440
+ "learning_rate": 4.907370637973845e-05,
441
+ "loss": 0.7774,
442
+ "step": 3200
443
+ },
444
+ {
445
+ "epoch": 0.09365336368662865,
446
+ "learning_rate": 4.9042762417954144e-05,
447
+ "loss": 0.754,
448
+ "step": 3250
449
+ },
450
+ {
451
+ "epoch": 0.09509418466642293,
452
+ "learning_rate": 4.901132013463844e-05,
453
+ "loss": 0.7621,
454
+ "step": 3300
455
+ },
456
+ {
457
+ "epoch": 0.09653500564621721,
458
+ "learning_rate": 4.897938018147878e-05,
459
+ "loss": 0.7558,
460
+ "step": 3350
461
+ },
462
+ {
463
+ "epoch": 0.0979758266260115,
464
+ "learning_rate": 4.894694322047757e-05,
465
+ "loss": 0.748,
466
+ "step": 3400
467
+ },
468
+ {
469
+ "epoch": 0.09941664760580579,
470
+ "learning_rate": 4.891400992393842e-05,
471
+ "loss": 0.759,
472
+ "step": 3450
473
+ },
474
+ {
475
+ "epoch": 0.10085746858560007,
476
+ "learning_rate": 4.888058097445223e-05,
477
+ "loss": 0.7614,
478
+ "step": 3500
479
+ },
480
+ {
481
+ "epoch": 0.10085746858560007,
482
+ "eval_loss": 0.7035399079322815,
483
+ "eval_runtime": 81.6585,
484
+ "eval_samples_per_second": 24.492,
485
+ "eval_steps_per_second": 12.246,
486
+ "step": 3500
487
+ },
488
+ {
489
+ "epoch": 0.10229828956539436,
490
+ "learning_rate": 4.8846657064883025e-05,
491
+ "loss": 0.7653,
492
+ "step": 3550
493
+ },
494
+ {
495
+ "epoch": 0.10373911054518865,
496
+ "learning_rate": 4.881223889835363e-05,
497
+ "loss": 0.7561,
498
+ "step": 3600
499
+ },
500
+ {
501
+ "epoch": 0.10517993152498294,
502
+ "learning_rate": 4.877732718823103e-05,
503
+ "loss": 0.739,
504
+ "step": 3650
505
+ },
506
+ {
507
+ "epoch": 0.10662075250477722,
508
+ "learning_rate": 4.8741922658111656e-05,
509
+ "loss": 0.7314,
510
+ "step": 3700
511
+ },
512
+ {
513
+ "epoch": 0.1080615734845715,
514
+ "learning_rate": 4.870602604180634e-05,
515
+ "loss": 0.7459,
516
+ "step": 3750
517
+ },
518
+ {
519
+ "epoch": 0.1095023944643658,
520
+ "learning_rate": 4.866963808332515e-05,
521
+ "loss": 0.7445,
522
+ "step": 3800
523
+ },
524
+ {
525
+ "epoch": 0.11094321544416008,
526
+ "learning_rate": 4.86327595368619e-05,
527
+ "loss": 0.7233,
528
+ "step": 3850
529
+ },
530
+ {
531
+ "epoch": 0.11238403642395436,
532
+ "learning_rate": 4.859539116677858e-05,
533
+ "loss": 0.743,
534
+ "step": 3900
535
+ },
536
+ {
537
+ "epoch": 0.11382485740374866,
538
+ "learning_rate": 4.855753374758951e-05,
539
+ "loss": 0.7251,
540
+ "step": 3950
541
+ },
542
+ {
543
+ "epoch": 0.11526567838354294,
544
+ "learning_rate": 4.851918806394525e-05,
545
+ "loss": 0.7273,
546
+ "step": 4000
547
+ },
548
+ {
549
+ "epoch": 0.11526567838354294,
550
+ "eval_loss": 0.6973708271980286,
551
+ "eval_runtime": 85.9653,
552
+ "eval_samples_per_second": 23.265,
553
+ "eval_steps_per_second": 11.633,
554
+ "step": 4000
555
+ },
556
+ {
557
+ "epoch": 0.11670649936333723,
558
+ "learning_rate": 4.8480354910616355e-05,
559
+ "loss": 0.7324,
560
+ "step": 4050
561
+ },
562
+ {
563
+ "epoch": 0.11814732034313151,
564
+ "learning_rate": 4.844103509247692e-05,
565
+ "loss": 0.7307,
566
+ "step": 4100
567
+ },
568
+ {
569
+ "epoch": 0.1195881413229258,
570
+ "learning_rate": 4.840122942448786e-05,
571
+ "loss": 0.7299,
572
+ "step": 4150
573
+ },
574
+ {
575
+ "epoch": 0.12102896230272009,
576
+ "learning_rate": 4.836093873168006e-05,
577
+ "loss": 0.7336,
578
+ "step": 4200
579
+ },
580
+ {
581
+ "epoch": 0.12246978328251437,
582
+ "learning_rate": 4.832016384913726e-05,
583
+ "loss": 0.7377,
584
+ "step": 4250
585
+ },
586
+ {
587
+ "epoch": 0.12391060426230867,
588
+ "learning_rate": 4.827890562197873e-05,
589
+ "loss": 0.7233,
590
+ "step": 4300
591
+ },
592
+ {
593
+ "epoch": 0.12535142524210294,
594
+ "learning_rate": 4.823716490534177e-05,
595
+ "loss": 0.7234,
596
+ "step": 4350
597
+ },
598
+ {
599
+ "epoch": 0.12679224622189725,
600
+ "learning_rate": 4.8194942564363975e-05,
601
+ "loss": 0.7171,
602
+ "step": 4400
603
+ },
604
+ {
605
+ "epoch": 0.12823306720169153,
606
+ "learning_rate": 4.815223947416533e-05,
607
+ "loss": 0.7242,
608
+ "step": 4450
609
+ },
610
+ {
611
+ "epoch": 0.1296738881814858,
612
+ "learning_rate": 4.810905651983004e-05,
613
+ "loss": 0.7214,
614
+ "step": 4500
615
+ },
616
+ {
617
+ "epoch": 0.1296738881814858,
618
+ "eval_loss": 0.6867671012878418,
619
+ "eval_runtime": 97.3752,
620
+ "eval_samples_per_second": 20.539,
621
+ "eval_steps_per_second": 10.27,
622
+ "step": 4500
623
+ },
624
+ {
625
+ "epoch": 0.1311147091612801,
626
+ "learning_rate": 4.8065394596388186e-05,
627
+ "loss": 0.7045,
628
+ "step": 4550
629
+ },
630
+ {
631
+ "epoch": 0.13255553014107438,
632
+ "learning_rate": 4.8021254608797214e-05,
633
+ "loss": 0.7144,
634
+ "step": 4600
635
+ },
636
+ {
637
+ "epoch": 0.13399635112086866,
638
+ "learning_rate": 4.7976637471923145e-05,
639
+ "loss": 0.7194,
640
+ "step": 4650
641
+ },
642
+ {
643
+ "epoch": 0.13543717210066294,
644
+ "learning_rate": 4.7931544110521584e-05,
645
+ "loss": 0.7149,
646
+ "step": 4700
647
+ },
648
+ {
649
+ "epoch": 0.13687799308045726,
650
+ "learning_rate": 4.788597545921865e-05,
651
+ "loss": 0.7198,
652
+ "step": 4750
653
+ },
654
+ {
655
+ "epoch": 0.13831881406025154,
656
+ "learning_rate": 4.7839932462491485e-05,
657
+ "loss": 0.6961,
658
+ "step": 4800
659
+ },
660
+ {
661
+ "epoch": 0.13975963504004582,
662
+ "learning_rate": 4.779341607464877e-05,
663
+ "loss": 0.716,
664
+ "step": 4850
665
+ },
666
+ {
667
+ "epoch": 0.1412004560198401,
668
+ "learning_rate": 4.774642725981089e-05,
669
+ "loss": 0.7102,
670
+ "step": 4900
671
+ },
672
+ {
673
+ "epoch": 0.1426412769996344,
674
+ "learning_rate": 4.769896699189001e-05,
675
+ "loss": 0.702,
676
+ "step": 4950
677
+ },
678
+ {
679
+ "epoch": 0.14408209797942867,
680
+ "learning_rate": 4.765103625456979e-05,
681
+ "loss": 0.7044,
682
+ "step": 5000
683
+ },
684
+ {
685
+ "epoch": 0.14408209797942867,
686
+ "eval_loss": 0.6795601844787598,
687
+ "eval_runtime": 83.068,
688
+ "eval_samples_per_second": 24.077,
689
+ "eval_steps_per_second": 12.038,
690
+ "step": 5000
691
+ },
692
+ {
693
+ "epoch": 0.14552291895922295,
694
+ "learning_rate": 4.760263604128511e-05,
695
+ "loss": 0.6948,
696
+ "step": 5050
697
+ },
698
+ {
699
+ "epoch": 0.14696373993901726,
700
+ "learning_rate": 4.755376735520143e-05,
701
+ "loss": 0.7073,
702
+ "step": 5100
703
+ },
704
+ {
705
+ "epoch": 0.14840456091881155,
706
+ "learning_rate": 4.750443120919395e-05,
707
+ "loss": 0.6887,
708
+ "step": 5150
709
+ },
710
+ {
711
+ "epoch": 0.14984538189860583,
712
+ "learning_rate": 4.7454628625826716e-05,
713
+ "loss": 0.6787,
714
+ "step": 5200
715
+ },
716
+ {
717
+ "epoch": 0.1512862028784001,
718
+ "learning_rate": 4.740436063733133e-05,
719
+ "loss": 0.6967,
720
+ "step": 5250
721
+ },
722
+ {
723
+ "epoch": 0.1527270238581944,
724
+ "learning_rate": 4.7353628285585625e-05,
725
+ "loss": 0.7072,
726
+ "step": 5300
727
+ },
728
+ {
729
+ "epoch": 0.15416784483798868,
730
+ "learning_rate": 4.730243262209204e-05,
731
+ "loss": 0.6821,
732
+ "step": 5350
733
+ },
734
+ {
735
+ "epoch": 0.15560866581778296,
736
+ "learning_rate": 4.7250774707955816e-05,
737
+ "loss": 0.6674,
738
+ "step": 5400
739
+ },
740
+ {
741
+ "epoch": 0.15704948679757727,
742
+ "learning_rate": 4.719865561386303e-05,
743
+ "loss": 0.6979,
744
+ "step": 5450
745
+ },
746
+ {
747
+ "epoch": 0.15849030777737155,
748
+ "learning_rate": 4.714607642005839e-05,
749
+ "loss": 0.688,
750
+ "step": 5500
751
+ },
752
+ {
753
+ "epoch": 0.15849030777737155,
754
+ "eval_loss": 0.6694391369819641,
755
+ "eval_runtime": 82.0167,
756
+ "eval_samples_per_second": 24.385,
757
+ "eval_steps_per_second": 12.193,
758
+ "step": 5500
759
+ },
760
+ {
761
+ "epoch": 0.15993112875716584,
762
+ "learning_rate": 4.709303821632284e-05,
763
+ "loss": 0.7001,
764
+ "step": 5550
765
+ },
766
+ {
767
+ "epoch": 0.16137194973696012,
768
+ "learning_rate": 4.703954210195097e-05,
769
+ "loss": 0.6761,
770
+ "step": 5600
771
+ },
772
+ {
773
+ "epoch": 0.1628127707167544,
774
+ "learning_rate": 4.6985589185728273e-05,
775
+ "loss": 0.6769,
776
+ "step": 5650
777
+ },
778
+ {
779
+ "epoch": 0.16425359169654868,
780
+ "learning_rate": 4.693118058590809e-05,
781
+ "loss": 0.6892,
782
+ "step": 5700
783
+ },
784
+ {
785
+ "epoch": 0.16569441267634297,
786
+ "learning_rate": 4.68763174301885e-05,
787
+ "loss": 0.6897,
788
+ "step": 5750
789
+ },
790
+ {
791
+ "epoch": 0.16713523365613728,
792
+ "learning_rate": 4.6821000855688903e-05,
793
+ "loss": 0.67,
794
+ "step": 5800
795
+ },
796
+ {
797
+ "epoch": 0.16857605463593156,
798
+ "learning_rate": 4.6765232008926485e-05,
799
+ "loss": 0.6681,
800
+ "step": 5850
801
+ },
802
+ {
803
+ "epoch": 0.17001687561572584,
804
+ "learning_rate": 4.6709012045792426e-05,
805
+ "loss": 0.6885,
806
+ "step": 5900
807
+ },
808
+ {
809
+ "epoch": 0.17145769659552013,
810
+ "learning_rate": 4.665234213152796e-05,
811
+ "loss": 0.6764,
812
+ "step": 5950
813
+ },
814
+ {
815
+ "epoch": 0.1728985175753144,
816
+ "learning_rate": 4.659522344070022e-05,
817
+ "loss": 0.675,
818
+ "step": 6000
819
+ },
820
+ {
821
+ "epoch": 0.1728985175753144,
822
+ "eval_loss": 0.6623409390449524,
823
+ "eval_runtime": 77.9602,
824
+ "eval_samples_per_second": 25.654,
825
+ "eval_steps_per_second": 12.827,
826
+ "step": 6000
827
+ },
828
+ {
829
+ "epoch": 0.1743393385551087,
830
+ "learning_rate": 4.653765715717788e-05,
831
+ "loss": 0.6609,
832
+ "step": 6050
833
+ },
834
+ {
835
+ "epoch": 0.17578015953490297,
836
+ "learning_rate": 4.6479644474106657e-05,
837
+ "loss": 0.6632,
838
+ "step": 6100
839
+ },
840
+ {
841
+ "epoch": 0.17722098051469728,
842
+ "learning_rate": 4.642118659388452e-05,
843
+ "loss": 0.6764,
844
+ "step": 6150
845
+ },
846
+ {
847
+ "epoch": 0.17866180149449157,
848
+ "learning_rate": 4.636228472813682e-05,
849
+ "loss": 0.6659,
850
+ "step": 6200
851
+ },
852
+ {
853
+ "epoch": 0.18010262247428585,
854
+ "learning_rate": 4.630294009769115e-05,
855
+ "loss": 0.6946,
856
+ "step": 6250
857
+ },
858
+ {
859
+ "epoch": 0.18154344345408013,
860
+ "learning_rate": 4.624315393255207e-05,
861
+ "loss": 0.6616,
862
+ "step": 6300
863
+ },
864
+ {
865
+ "epoch": 0.18298426443387442,
866
+ "learning_rate": 4.618292747187559e-05,
867
+ "loss": 0.6614,
868
+ "step": 6350
869
+ },
870
+ {
871
+ "epoch": 0.1844250854136687,
872
+ "learning_rate": 4.612226196394347e-05,
873
+ "loss": 0.6712,
874
+ "step": 6400
875
+ },
876
+ {
877
+ "epoch": 0.18586590639346298,
878
+ "learning_rate": 4.6061158666137376e-05,
879
+ "loss": 0.6708,
880
+ "step": 6450
881
+ },
882
+ {
883
+ "epoch": 0.1873067273732573,
884
+ "learning_rate": 4.599961884491283e-05,
885
+ "loss": 0.649,
886
+ "step": 6500
887
+ },
888
+ {
889
+ "epoch": 0.1873067273732573,
890
+ "eval_loss": 0.6525030732154846,
891
+ "eval_runtime": 79.56,
892
+ "eval_samples_per_second": 25.138,
893
+ "eval_steps_per_second": 12.569,
894
+ "step": 6500
895
+ },
896
+ {
897
+ "epoch": 0.18874754835305158,
898
+ "learning_rate": 4.593764377577293e-05,
899
+ "loss": 0.6663,
900
+ "step": 6550
901
+ },
902
+ {
903
+ "epoch": 0.19018836933284586,
904
+ "learning_rate": 4.587523474324193e-05,
905
+ "loss": 0.654,
906
+ "step": 6600
907
+ },
908
+ {
909
+ "epoch": 0.19162919031264014,
910
+ "learning_rate": 4.58123930408386e-05,
911
+ "loss": 0.646,
912
+ "step": 6650
913
+ },
914
+ {
915
+ "epoch": 0.19307001129243442,
916
+ "learning_rate": 4.5749119971049424e-05,
917
+ "loss": 0.6426,
918
+ "step": 6700
919
+ },
920
+ {
921
+ "epoch": 0.1945108322722287,
922
+ "learning_rate": 4.5685416845301644e-05,
923
+ "loss": 0.6521,
924
+ "step": 6750
925
+ },
926
+ {
927
+ "epoch": 0.195951653252023,
928
+ "learning_rate": 4.562128498393601e-05,
929
+ "loss": 0.6577,
930
+ "step": 6800
931
+ },
932
+ {
933
+ "epoch": 0.1973924742318173,
934
+ "learning_rate": 4.5556725716179454e-05,
935
+ "loss": 0.6592,
936
+ "step": 6850
937
+ },
938
+ {
939
+ "epoch": 0.19883329521161158,
940
+ "learning_rate": 4.5491740380117534e-05,
941
+ "loss": 0.6621,
942
+ "step": 6900
943
+ },
944
+ {
945
+ "epoch": 0.20027411619140587,
946
+ "learning_rate": 4.5426330322666696e-05,
947
+ "loss": 0.6401,
948
+ "step": 6950
949
+ },
950
+ {
951
+ "epoch": 0.20171493717120015,
952
+ "learning_rate": 4.536049689954637e-05,
953
+ "loss": 0.651,
954
+ "step": 7000
955
+ },
956
+ {
957
+ "epoch": 0.20171493717120015,
958
+ "eval_loss": 0.6546201109886169,
959
+ "eval_runtime": 78.0665,
960
+ "eval_samples_per_second": 25.619,
961
+ "eval_steps_per_second": 12.81,
962
+ "step": 7000
963
+ },
964
+ {
965
+ "epoch": 0.20315575815099443,
966
+ "learning_rate": 4.529424147525086e-05,
967
+ "loss": 0.6629,
968
+ "step": 7050
969
+ },
970
+ {
971
+ "epoch": 0.2045965791307887,
972
+ "learning_rate": 4.522756542302103e-05,
973
+ "loss": 0.6654,
974
+ "step": 7100
975
+ },
976
+ {
977
+ "epoch": 0.206037400110583,
978
+ "learning_rate": 4.516047012481594e-05,
979
+ "loss": 0.6455,
980
+ "step": 7150
981
+ },
982
+ {
983
+ "epoch": 0.2074782210903773,
984
+ "learning_rate": 4.509295697128407e-05,
985
+ "loss": 0.6417,
986
+ "step": 7200
987
+ },
988
+ {
989
+ "epoch": 0.2089190420701716,
990
+ "learning_rate": 4.502502736173462e-05,
991
+ "loss": 0.6423,
992
+ "step": 7250
993
+ },
994
+ {
995
+ "epoch": 0.21035986304996587,
996
+ "learning_rate": 4.495668270410841e-05,
997
+ "loss": 0.654,
998
+ "step": 7300
999
+ },
1000
+ {
1001
+ "epoch": 0.21180068402976016,
1002
+ "learning_rate": 4.488792441494876e-05,
1003
+ "loss": 0.6385,
1004
+ "step": 7350
1005
+ },
1006
+ {
1007
+ "epoch": 0.21324150500955444,
1008
+ "learning_rate": 4.481875391937211e-05,
1009
+ "loss": 0.6462,
1010
+ "step": 7400
1011
+ },
1012
+ {
1013
+ "epoch": 0.21468232598934872,
1014
+ "learning_rate": 4.4749172651038493e-05,
1015
+ "loss": 0.6564,
1016
+ "step": 7450
1017
+ },
1018
+ {
1019
+ "epoch": 0.216123146969143,
1020
+ "learning_rate": 4.4679182052121794e-05,
1021
+ "loss": 0.6293,
1022
+ "step": 7500
1023
+ },
1024
+ {
1025
+ "epoch": 0.216123146969143,
1026
+ "eval_loss": 0.6418629884719849,
1027
+ "eval_runtime": 79.3564,
1028
+ "eval_samples_per_second": 25.203,
1029
+ "eval_steps_per_second": 12.601,
1030
+ "step": 7500
1031
+ },
1032
+ {
1033
+ "epoch": 0.21756396794893731,
1034
+ "learning_rate": 4.4608783573279867e-05,
1035
+ "loss": 0.6527,
1036
+ "step": 7550
1037
+ },
1038
+ {
1039
+ "epoch": 0.2190047889287316,
1040
+ "learning_rate": 4.453797867362449e-05,
1041
+ "loss": 0.6462,
1042
+ "step": 7600
1043
+ },
1044
+ {
1045
+ "epoch": 0.22044560990852588,
1046
+ "learning_rate": 4.446676882069112e-05,
1047
+ "loss": 0.633,
1048
+ "step": 7650
1049
+ },
1050
+ {
1051
+ "epoch": 0.22188643088832016,
1052
+ "learning_rate": 4.4395155490408456e-05,
1053
+ "loss": 0.6243,
1054
+ "step": 7700
1055
+ },
1056
+ {
1057
+ "epoch": 0.22332725186811445,
1058
+ "learning_rate": 4.4323140167067835e-05,
1059
+ "loss": 0.6415,
1060
+ "step": 7750
1061
+ },
1062
+ {
1063
+ "epoch": 0.22476807284790873,
1064
+ "learning_rate": 4.425072434329252e-05,
1065
+ "loss": 0.6413,
1066
+ "step": 7800
1067
+ },
1068
+ {
1069
+ "epoch": 0.226208893827703,
1070
+ "learning_rate": 4.4177909520006714e-05,
1071
+ "loss": 0.6213,
1072
+ "step": 7850
1073
+ },
1074
+ {
1075
+ "epoch": 0.22764971480749732,
1076
+ "learning_rate": 4.4104697206404484e-05,
1077
+ "loss": 0.6647,
1078
+ "step": 7900
1079
+ },
1080
+ {
1081
+ "epoch": 0.2290905357872916,
1082
+ "learning_rate": 4.403108891991846e-05,
1083
+ "loss": 0.6275,
1084
+ "step": 7950
1085
+ },
1086
+ {
1087
+ "epoch": 0.2305313567670859,
1088
+ "learning_rate": 4.395708618618839e-05,
1089
+ "loss": 0.6367,
1090
+ "step": 8000
1091
+ },
1092
+ {
1093
+ "epoch": 0.2305313567670859,
1094
+ "eval_loss": 0.6377586722373962,
1095
+ "eval_runtime": 79.238,
1096
+ "eval_samples_per_second": 25.24,
1097
+ "eval_steps_per_second": 12.62,
1098
+ "step": 8000
1099
+ },
1100
+ {
1101
+ "epoch": 0.23197217774688017,
1102
+ "learning_rate": 4.3882690539029526e-05,
1103
+ "loss": 0.6302,
1104
+ "step": 8050
1105
+ },
1106
+ {
1107
+ "epoch": 0.23341299872667445,
1108
+ "learning_rate": 4.380790352040082e-05,
1109
+ "loss": 0.6295,
1110
+ "step": 8100
1111
+ },
1112
+ {
1113
+ "epoch": 0.23485381970646874,
1114
+ "learning_rate": 4.373272668037298e-05,
1115
+ "loss": 0.6278,
1116
+ "step": 8150
1117
+ },
1118
+ {
1119
+ "epoch": 0.23629464068626302,
1120
+ "learning_rate": 4.365716157709632e-05,
1121
+ "loss": 0.6136,
1122
+ "step": 8200
1123
+ },
1124
+ {
1125
+ "epoch": 0.23773546166605733,
1126
+ "learning_rate": 4.35812097767685e-05,
1127
+ "loss": 0.6312,
1128
+ "step": 8250
1129
+ },
1130
+ {
1131
+ "epoch": 0.2391762826458516,
1132
+ "learning_rate": 4.350487285360203e-05,
1133
+ "loss": 0.6261,
1134
+ "step": 8300
1135
+ },
1136
+ {
1137
+ "epoch": 0.2406171036256459,
1138
+ "learning_rate": 4.3428152389791654e-05,
1139
+ "loss": 0.6348,
1140
+ "step": 8350
1141
+ },
1142
+ {
1143
+ "epoch": 0.24205792460544018,
1144
+ "learning_rate": 4.335104997548157e-05,
1145
+ "loss": 0.625,
1146
+ "step": 8400
1147
+ },
1148
+ {
1149
+ "epoch": 0.24349874558523446,
1150
+ "learning_rate": 4.3273567208732454e-05,
1151
+ "loss": 0.6418,
1152
+ "step": 8450
1153
+ },
1154
+ {
1155
+ "epoch": 0.24493956656502874,
1156
+ "learning_rate": 4.319570569548834e-05,
1157
+ "loss": 0.6282,
1158
+ "step": 8500
1159
+ },
1160
+ {
1161
+ "epoch": 0.24493956656502874,
1162
+ "eval_loss": 0.6374307870864868,
1163
+ "eval_runtime": 81.6998,
1164
+ "eval_samples_per_second": 24.48,
1165
+ "eval_steps_per_second": 12.24,
1166
+ "step": 8500
1167
+ },
1168
+ {
1169
+ "epoch": 0.24638038754482305,
1170
+ "learning_rate": 4.3117467049543355e-05,
1171
+ "loss": 0.6284,
1172
+ "step": 8550
1173
+ },
1174
+ {
1175
+ "epoch": 0.24782120852461734,
1176
+ "learning_rate": 4.303885289250823e-05,
1177
+ "loss": 0.6069,
1178
+ "step": 8600
1179
+ },
1180
+ {
1181
+ "epoch": 0.24926202950441162,
1182
+ "learning_rate": 4.2959864853776736e-05,
1183
+ "loss": 0.6072,
1184
+ "step": 8650
1185
+ },
1186
+ {
1187
+ "epoch": 0.2507028504842059,
1188
+ "learning_rate": 4.288050457049188e-05,
1189
+ "loss": 0.6032,
1190
+ "step": 8700
1191
+ },
1192
+ {
1193
+ "epoch": 0.2521436714640002,
1194
+ "learning_rate": 4.280077368751198e-05,
1195
+ "loss": 0.6178,
1196
+ "step": 8750
1197
+ },
1198
+ {
1199
+ "epoch": 0.2535844924437945,
1200
+ "learning_rate": 4.27206738573766e-05,
1201
+ "loss": 0.6217,
1202
+ "step": 8800
1203
+ },
1204
+ {
1205
+ "epoch": 0.25502531342358875,
1206
+ "learning_rate": 4.264020674027224e-05,
1207
+ "loss": 0.6204,
1208
+ "step": 8850
1209
+ },
1210
+ {
1211
+ "epoch": 0.25646613440338306,
1212
+ "learning_rate": 4.255937400399799e-05,
1213
+ "loss": 0.617,
1214
+ "step": 8900
1215
+ },
1216
+ {
1217
+ "epoch": 0.2579069553831773,
1218
+ "learning_rate": 4.247817732393091e-05,
1219
+ "loss": 0.6161,
1220
+ "step": 8950
1221
+ },
1222
+ {
1223
+ "epoch": 0.2593477763629716,
1224
+ "learning_rate": 4.239661838299136e-05,
1225
+ "loss": 0.6009,
1226
+ "step": 9000
1227
+ },
1228
+ {
1229
+ "epoch": 0.2593477763629716,
1230
+ "eval_loss": 0.6277636885643005,
1231
+ "eval_runtime": 80.2755,
1232
+ "eval_samples_per_second": 24.914,
1233
+ "eval_steps_per_second": 12.457,
1234
+ "step": 9000
1235
+ },
1236
+ {
1237
+ "epoch": 0.2607885973427659,
1238
+ "learning_rate": 4.231469887160806e-05,
1239
+ "loss": 0.618,
1240
+ "step": 9050
1241
+ },
1242
+ {
1243
+ "epoch": 0.2622294183225602,
1244
+ "learning_rate": 4.22324204876831e-05,
1245
+ "loss": 0.6253,
1246
+ "step": 9100
1247
+ },
1248
+ {
1249
+ "epoch": 0.2636702393023545,
1250
+ "learning_rate": 4.214978493655671e-05,
1251
+ "loss": 0.6022,
1252
+ "step": 9150
1253
+ },
1254
+ {
1255
+ "epoch": 0.26511106028214876,
1256
+ "learning_rate": 4.206679393097196e-05,
1257
+ "loss": 0.6196,
1258
+ "step": 9200
1259
+ },
1260
+ {
1261
+ "epoch": 0.26655188126194307,
1262
+ "learning_rate": 4.19834491910392e-05,
1263
+ "loss": 0.6031,
1264
+ "step": 9250
1265
+ },
1266
+ {
1267
+ "epoch": 0.2679927022417373,
1268
+ "learning_rate": 4.189975244420049e-05,
1269
+ "loss": 0.6251,
1270
+ "step": 9300
1271
+ },
1272
+ {
1273
+ "epoch": 0.26943352322153163,
1274
+ "learning_rate": 4.18157054251937e-05,
1275
+ "loss": 0.6217,
1276
+ "step": 9350
1277
+ },
1278
+ {
1279
+ "epoch": 0.2708743442013259,
1280
+ "learning_rate": 4.1731309876016656e-05,
1281
+ "loss": 0.6172,
1282
+ "step": 9400
1283
+ },
1284
+ {
1285
+ "epoch": 0.2723151651811202,
1286
+ "learning_rate": 4.1646567545890934e-05,
1287
+ "loss": 0.6074,
1288
+ "step": 9450
1289
+ },
1290
+ {
1291
+ "epoch": 0.2737559861609145,
1292
+ "learning_rate": 4.1561480191225694e-05,
1293
+ "loss": 0.6087,
1294
+ "step": 9500
1295
+ },
1296
+ {
1297
+ "epoch": 0.2737559861609145,
1298
+ "eval_loss": 0.6340173482894897,
1299
+ "eval_runtime": 77.5417,
1300
+ "eval_samples_per_second": 25.793,
1301
+ "eval_steps_per_second": 12.896,
1302
+ "step": 9500
1303
+ },
1304
+ {
1305
+ "epoch": 0.27519680714070877,
1306
+ "learning_rate": 4.147604957558121e-05,
1307
+ "loss": 0.6242,
1308
+ "step": 9550
1309
+ },
1310
+ {
1311
+ "epoch": 0.2766376281205031,
1312
+ "learning_rate": 4.139027746963235e-05,
1313
+ "loss": 0.603,
1314
+ "step": 9600
1315
+ },
1316
+ {
1317
+ "epoch": 0.27807844910029733,
1318
+ "learning_rate": 4.130416565113188e-05,
1319
+ "loss": 0.6047,
1320
+ "step": 9650
1321
+ },
1322
+ {
1323
+ "epoch": 0.27951927008009164,
1324
+ "learning_rate": 4.121771590487358e-05,
1325
+ "loss": 0.6049,
1326
+ "step": 9700
1327
+ },
1328
+ {
1329
+ "epoch": 0.2809600910598859,
1330
+ "learning_rate": 4.113093002265531e-05,
1331
+ "loss": 0.5943,
1332
+ "step": 9750
1333
+ },
1334
+ {
1335
+ "epoch": 0.2824009120396802,
1336
+ "learning_rate": 4.104380980324184e-05,
1337
+ "loss": 0.6229,
1338
+ "step": 9800
1339
+ },
1340
+ {
1341
+ "epoch": 0.2838417330194745,
1342
+ "learning_rate": 4.0956357052327536e-05,
1343
+ "loss": 0.586,
1344
+ "step": 9850
1345
+ },
1346
+ {
1347
+ "epoch": 0.2852825539992688,
1348
+ "learning_rate": 4.0868573582499004e-05,
1349
+ "loss": 0.6058,
1350
+ "step": 9900
1351
+ },
1352
+ {
1353
+ "epoch": 0.2867233749790631,
1354
+ "learning_rate": 4.078046121319746e-05,
1355
+ "loss": 0.6081,
1356
+ "step": 9950
1357
+ },
1358
+ {
1359
+ "epoch": 0.28816419595885734,
1360
+ "learning_rate": 4.0692021770681066e-05,
1361
+ "loss": 0.5998,
1362
+ "step": 10000
1363
+ },
1364
+ {
1365
+ "epoch": 0.28816419595885734,
1366
+ "eval_loss": 0.6245400309562683,
1367
+ "eval_runtime": 81.0137,
1368
+ "eval_samples_per_second": 24.687,
1369
+ "eval_steps_per_second": 12.344,
1370
+ "step": 10000
1371
+ },
1372
+ {
1373
+ "epoch": 0.28960501693865165,
1374
+ "learning_rate": 4.0603257087987025e-05,
1375
+ "loss": 0.5999,
1376
+ "step": 10050
1377
+ },
1378
+ {
1379
+ "epoch": 0.2910458379184459,
1380
+ "learning_rate": 4.0514169004893656e-05,
1381
+ "loss": 0.5977,
1382
+ "step": 10100
1383
+ },
1384
+ {
1385
+ "epoch": 0.2924866588982402,
1386
+ "learning_rate": 4.042475936788219e-05,
1387
+ "loss": 0.5891,
1388
+ "step": 10150
1389
+ },
1390
+ {
1391
+ "epoch": 0.2939274798780345,
1392
+ "learning_rate": 4.033503003009858e-05,
1393
+ "loss": 0.5896,
1394
+ "step": 10200
1395
+ },
1396
+ {
1397
+ "epoch": 0.2953683008578288,
1398
+ "learning_rate": 4.0244982851315006e-05,
1399
+ "loss": 0.5929,
1400
+ "step": 10250
1401
+ },
1402
+ {
1403
+ "epoch": 0.2968091218376231,
1404
+ "learning_rate": 4.015461969789138e-05,
1405
+ "loss": 0.6017,
1406
+ "step": 10300
1407
+ },
1408
+ {
1409
+ "epoch": 0.29824994281741735,
1410
+ "learning_rate": 4.006394244273666e-05,
1411
+ "loss": 0.5839,
1412
+ "step": 10350
1413
+ },
1414
+ {
1415
+ "epoch": 0.29969076379721166,
1416
+ "learning_rate": 3.9972952965270006e-05,
1417
+ "loss": 0.6026,
1418
+ "step": 10400
1419
+ },
1420
+ {
1421
+ "epoch": 0.3011315847770059,
1422
+ "learning_rate": 3.9881653151381884e-05,
1423
+ "loss": 0.5985,
1424
+ "step": 10450
1425
+ },
1426
+ {
1427
+ "epoch": 0.3025724057568002,
1428
+ "learning_rate": 3.9790044893394886e-05,
1429
+ "loss": 0.5996,
1430
+ "step": 10500
1431
+ },
1432
+ {
1433
+ "epoch": 0.3025724057568002,
1434
+ "eval_loss": 0.6157839894294739,
1435
+ "eval_runtime": 82.2023,
1436
+ "eval_samples_per_second": 24.33,
1437
+ "eval_steps_per_second": 12.165,
1438
+ "step": 10500
1439
+ },
1440
+ {
1441
+ "epoch": 0.30401322673659453,
1442
+ "learning_rate": 3.969813009002459e-05,
1443
+ "loss": 0.595,
1444
+ "step": 10550
1445
+ },
1446
+ {
1447
+ "epoch": 0.3054540477163888,
1448
+ "learning_rate": 3.960591064634017e-05,
1449
+ "loss": 0.59,
1450
+ "step": 10600
1451
+ },
1452
+ {
1453
+ "epoch": 0.3068948686961831,
1454
+ "learning_rate": 3.9513388473724935e-05,
1455
+ "loss": 0.5895,
1456
+ "step": 10650
1457
+ },
1458
+ {
1459
+ "epoch": 0.30833568967597735,
1460
+ "learning_rate": 3.942056548983666e-05,
1461
+ "loss": 0.595,
1462
+ "step": 10700
1463
+ },
1464
+ {
1465
+ "epoch": 0.30977651065577166,
1466
+ "learning_rate": 3.93274436185679e-05,
1467
+ "loss": 0.5867,
1468
+ "step": 10750
1469
+ },
1470
+ {
1471
+ "epoch": 0.3112173316355659,
1472
+ "learning_rate": 3.9234024790006085e-05,
1473
+ "loss": 0.5851,
1474
+ "step": 10800
1475
+ },
1476
+ {
1477
+ "epoch": 0.31265815261536023,
1478
+ "learning_rate": 3.9140310940393524e-05,
1479
+ "loss": 0.5963,
1480
+ "step": 10850
1481
+ },
1482
+ {
1483
+ "epoch": 0.31409897359515454,
1484
+ "learning_rate": 3.9046304012087265e-05,
1485
+ "loss": 0.5827,
1486
+ "step": 10900
1487
+ },
1488
+ {
1489
+ "epoch": 0.3155397945749488,
1490
+ "learning_rate": 3.895200595351883e-05,
1491
+ "loss": 0.5885,
1492
+ "step": 10950
1493
+ },
1494
+ {
1495
+ "epoch": 0.3169806155547431,
1496
+ "learning_rate": 3.885741871915386e-05,
1497
+ "loss": 0.5837,
1498
+ "step": 11000
1499
+ },
1500
+ {
1501
+ "epoch": 0.3169806155547431,
1502
+ "eval_loss": 0.616204023361206,
1503
+ "eval_runtime": 80.49,
1504
+ "eval_samples_per_second": 24.848,
1505
+ "eval_steps_per_second": 12.424,
1506
+ "step": 11000
1507
+ },
1508
+ {
1509
+ "epoch": 0.31842143653453736,
1510
+ "learning_rate": 3.87625442694516e-05,
1511
+ "loss": 0.6003,
1512
+ "step": 11050
1513
+ },
1514
+ {
1515
+ "epoch": 0.31986225751433167,
1516
+ "learning_rate": 3.866738457082423e-05,
1517
+ "loss": 0.5922,
1518
+ "step": 11100
1519
+ },
1520
+ {
1521
+ "epoch": 0.3213030784941259,
1522
+ "learning_rate": 3.857194159559613e-05,
1523
+ "loss": 0.5919,
1524
+ "step": 11150
1525
+ },
1526
+ {
1527
+ "epoch": 0.32274389947392024,
1528
+ "learning_rate": 3.8476217321963026e-05,
1529
+ "loss": 0.5888,
1530
+ "step": 11200
1531
+ },
1532
+ {
1533
+ "epoch": 0.32418472045371455,
1534
+ "learning_rate": 3.8380213733950957e-05,
1535
+ "loss": 0.5644,
1536
+ "step": 11250
1537
+ },
1538
+ {
1539
+ "epoch": 0.3256255414335088,
1540
+ "learning_rate": 3.828393282137515e-05,
1541
+ "loss": 0.5837,
1542
+ "step": 11300
1543
+ },
1544
+ {
1545
+ "epoch": 0.3270663624133031,
1546
+ "learning_rate": 3.81873765797988e-05,
1547
+ "loss": 0.6009,
1548
+ "step": 11350
1549
+ },
1550
+ {
1551
+ "epoch": 0.32850718339309737,
1552
+ "learning_rate": 3.8090547010491715e-05,
1553
+ "loss": 0.5814,
1554
+ "step": 11400
1555
+ },
1556
+ {
1557
+ "epoch": 0.3299480043728917,
1558
+ "learning_rate": 3.799344612038879e-05,
1559
+ "loss": 0.5964,
1560
+ "step": 11450
1561
+ },
1562
+ {
1563
+ "epoch": 0.33138882535268593,
1564
+ "learning_rate": 3.789607592204847e-05,
1565
+ "loss": 0.5922,
1566
+ "step": 11500
1567
+ },
1568
+ {
1569
+ "epoch": 0.33138882535268593,
1570
+ "eval_loss": 0.6145178079605103,
1571
+ "eval_runtime": 80.6686,
1572
+ "eval_samples_per_second": 24.793,
1573
+ "eval_steps_per_second": 12.396,
1574
+ "step": 11500
1575
+ },
1576
+ {
1577
+ "epoch": 0.33282964633248024,
1578
+ "learning_rate": 3.779843843361099e-05,
1579
+ "loss": 0.6064,
1580
+ "step": 11550
1581
+ },
1582
+ {
1583
+ "epoch": 0.33427046731227456,
1584
+ "learning_rate": 3.770053567875657e-05,
1585
+ "loss": 0.5772,
1586
+ "step": 11600
1587
+ },
1588
+ {
1589
+ "epoch": 0.3357112882920688,
1590
+ "learning_rate": 3.7602369686663474e-05,
1591
+ "loss": 0.5699,
1592
+ "step": 11650
1593
+ },
1594
+ {
1595
+ "epoch": 0.3371521092718631,
1596
+ "learning_rate": 3.7503942491965924e-05,
1597
+ "loss": 0.5905,
1598
+ "step": 11700
1599
+ },
1600
+ {
1601
+ "epoch": 0.3385929302516574,
1602
+ "learning_rate": 3.7405256134711976e-05,
1603
+ "loss": 0.576,
1604
+ "step": 11750
1605
+ },
1606
+ {
1607
+ "epoch": 0.3400337512314517,
1608
+ "learning_rate": 3.730631266032119e-05,
1609
+ "loss": 0.5911,
1610
+ "step": 11800
1611
+ },
1612
+ {
1613
+ "epoch": 0.34147457221124594,
1614
+ "learning_rate": 3.720711411954226e-05,
1615
+ "loss": 0.576,
1616
+ "step": 11850
1617
+ },
1618
+ {
1619
+ "epoch": 0.34291539319104025,
1620
+ "learning_rate": 3.710766256841051e-05,
1621
+ "loss": 0.5819,
1622
+ "step": 11900
1623
+ },
1624
+ {
1625
+ "epoch": 0.34435621417083456,
1626
+ "learning_rate": 3.7007960068205274e-05,
1627
+ "loss": 0.5831,
1628
+ "step": 11950
1629
+ },
1630
+ {
1631
+ "epoch": 0.3457970351506288,
1632
+ "learning_rate": 3.690800868540716e-05,
1633
+ "loss": 0.5828,
1634
+ "step": 12000
1635
+ },
1636
+ {
1637
+ "epoch": 0.3457970351506288,
1638
+ "eval_loss": 0.6077320575714111,
1639
+ "eval_runtime": 79.5341,
1640
+ "eval_samples_per_second": 25.146,
1641
+ "eval_steps_per_second": 12.573,
1642
+ "step": 12000
1643
+ },
1644
+ {
1645
+ "epoch": 0.34723785613042313,
1646
+ "learning_rate": 3.6807810491655264e-05,
1647
+ "loss": 0.5529,
1648
+ "step": 12050
1649
+ },
1650
+ {
1651
+ "epoch": 0.3486786771102174,
1652
+ "learning_rate": 3.670736756370417e-05,
1653
+ "loss": 0.5784,
1654
+ "step": 12100
1655
+ },
1656
+ {
1657
+ "epoch": 0.3501194980900117,
1658
+ "learning_rate": 3.660668198338095e-05,
1659
+ "loss": 0.5776,
1660
+ "step": 12150
1661
+ },
1662
+ {
1663
+ "epoch": 0.35156031906980595,
1664
+ "learning_rate": 3.650575583754201e-05,
1665
+ "loss": 0.5754,
1666
+ "step": 12200
1667
+ },
1668
+ {
1669
+ "epoch": 0.35300114004960026,
1670
+ "learning_rate": 3.640459121802981e-05,
1671
+ "loss": 0.5754,
1672
+ "step": 12250
1673
+ },
1674
+ {
1675
+ "epoch": 0.35444196102939457,
1676
+ "learning_rate": 3.6303190221629546e-05,
1677
+ "loss": 0.5667,
1678
+ "step": 12300
1679
+ },
1680
+ {
1681
+ "epoch": 0.3558827820091888,
1682
+ "learning_rate": 3.620155495002566e-05,
1683
+ "loss": 0.5636,
1684
+ "step": 12350
1685
+ },
1686
+ {
1687
+ "epoch": 0.35732360298898314,
1688
+ "learning_rate": 3.609968750975829e-05,
1689
+ "loss": 0.5682,
1690
+ "step": 12400
1691
+ },
1692
+ {
1693
+ "epoch": 0.3587644239687774,
1694
+ "learning_rate": 3.599759001217964e-05,
1695
+ "loss": 0.5697,
1696
+ "step": 12450
1697
+ },
1698
+ {
1699
+ "epoch": 0.3602052449485717,
1700
+ "learning_rate": 3.589526457341014e-05,
1701
+ "loss": 0.5721,
1702
+ "step": 12500
1703
+ },
1704
+ {
1705
+ "epoch": 0.3602052449485717,
1706
+ "eval_loss": 0.6059696674346924,
1707
+ "eval_runtime": 79.5408,
1708
+ "eval_samples_per_second": 25.144,
1709
+ "eval_steps_per_second": 12.572,
1710
+ "step": 12500
1711
+ },
1712
+ {
1713
+ "epoch": 0.36164606592836596,
1714
+ "learning_rate": 3.5792713314294654e-05,
1715
+ "loss": 0.5777,
1716
+ "step": 12550
1717
+ },
1718
+ {
1719
+ "epoch": 0.36308688690816027,
1720
+ "learning_rate": 3.568993836035854e-05,
1721
+ "loss": 0.564,
1722
+ "step": 12600
1723
+ },
1724
+ {
1725
+ "epoch": 0.3645277078879546,
1726
+ "learning_rate": 3.558694184176351e-05,
1727
+ "loss": 0.5855,
1728
+ "step": 12650
1729
+ },
1730
+ {
1731
+ "epoch": 0.36596852886774883,
1732
+ "learning_rate": 3.548372589326356e-05,
1733
+ "loss": 0.5532,
1734
+ "step": 12700
1735
+ },
1736
+ {
1737
+ "epoch": 0.36740934984754314,
1738
+ "learning_rate": 3.5380292654160674e-05,
1739
+ "loss": 0.5683,
1740
+ "step": 12750
1741
+ },
1742
+ {
1743
+ "epoch": 0.3688501708273374,
1744
+ "learning_rate": 3.5276644268260505e-05,
1745
+ "loss": 0.5611,
1746
+ "step": 12800
1747
+ },
1748
+ {
1749
+ "epoch": 0.3702909918071317,
1750
+ "learning_rate": 3.517278288382795e-05,
1751
+ "loss": 0.5507,
1752
+ "step": 12850
1753
+ },
1754
+ {
1755
+ "epoch": 0.37173181278692596,
1756
+ "learning_rate": 3.5068710653542615e-05,
1757
+ "loss": 0.5502,
1758
+ "step": 12900
1759
+ },
1760
+ {
1761
+ "epoch": 0.3731726337667203,
1762
+ "learning_rate": 3.496442973445417e-05,
1763
+ "loss": 0.5758,
1764
+ "step": 12950
1765
+ },
1766
+ {
1767
+ "epoch": 0.3746134547465146,
1768
+ "learning_rate": 3.4859942287937706e-05,
1769
+ "loss": 0.5575,
1770
+ "step": 13000
1771
+ },
1772
+ {
1773
+ "epoch": 0.3746134547465146,
1774
+ "eval_loss": 0.6036191582679749,
1775
+ "eval_runtime": 80.0548,
1776
+ "eval_samples_per_second": 24.983,
1777
+ "eval_steps_per_second": 12.491,
1778
+ "step": 13000
1779
+ },
1780
+ {
1781
+ "epoch": 0.37605427572630884,
1782
+ "learning_rate": 3.475525047964887e-05,
1783
+ "loss": 0.5745,
1784
+ "step": 13050
1785
+ },
1786
+ {
1787
+ "epoch": 0.37749509670610315,
1788
+ "learning_rate": 3.4650356479479e-05,
1789
+ "loss": 0.5601,
1790
+ "step": 13100
1791
+ },
1792
+ {
1793
+ "epoch": 0.3789359176858974,
1794
+ "learning_rate": 3.454526246151019e-05,
1795
+ "loss": 0.5612,
1796
+ "step": 13150
1797
+ },
1798
+ {
1799
+ "epoch": 0.3803767386656917,
1800
+ "learning_rate": 3.443997060397015e-05,
1801
+ "loss": 0.5699,
1802
+ "step": 13200
1803
+ },
1804
+ {
1805
+ "epoch": 0.38181755964548597,
1806
+ "learning_rate": 3.4334483089187163e-05,
1807
+ "loss": 0.5648,
1808
+ "step": 13250
1809
+ },
1810
+ {
1811
+ "epoch": 0.3832583806252803,
1812
+ "learning_rate": 3.422880210354475e-05,
1813
+ "loss": 0.5664,
1814
+ "step": 13300
1815
+ },
1816
+ {
1817
+ "epoch": 0.3846992016050746,
1818
+ "learning_rate": 3.412292983743642e-05,
1819
+ "loss": 0.558,
1820
+ "step": 13350
1821
+ },
1822
+ {
1823
+ "epoch": 0.38614002258486885,
1824
+ "learning_rate": 3.401686848522023e-05,
1825
+ "loss": 0.5625,
1826
+ "step": 13400
1827
+ },
1828
+ {
1829
+ "epoch": 0.38758084356466316,
1830
+ "learning_rate": 3.391062024517336e-05,
1831
+ "loss": 0.5511,
1832
+ "step": 13450
1833
+ },
1834
+ {
1835
+ "epoch": 0.3890216645444574,
1836
+ "learning_rate": 3.3804187319446484e-05,
1837
+ "loss": 0.5649,
1838
+ "step": 13500
1839
+ },
1840
+ {
1841
+ "epoch": 0.3890216645444574,
1842
+ "eval_loss": 0.6038739085197449,
1843
+ "eval_runtime": 79.3792,
1844
+ "eval_samples_per_second": 25.196,
1845
+ "eval_steps_per_second": 12.598,
1846
+ "step": 13500
1847
+ },
1848
+ {
1849
+ "epoch": 0.3904624855242517,
1850
+ "learning_rate": 3.3697571914018164e-05,
1851
+ "loss": 0.5447,
1852
+ "step": 13550
1853
+ },
1854
+ {
1855
+ "epoch": 0.391903306504046,
1856
+ "learning_rate": 3.359077623864913e-05,
1857
+ "loss": 0.5457,
1858
+ "step": 13600
1859
+ },
1860
+ {
1861
+ "epoch": 0.3933441274838403,
1862
+ "learning_rate": 3.348380250683647e-05,
1863
+ "loss": 0.5455,
1864
+ "step": 13650
1865
+ },
1866
+ {
1867
+ "epoch": 0.3947849484636346,
1868
+ "learning_rate": 3.337665293576773e-05,
1869
+ "loss": 0.5402,
1870
+ "step": 13700
1871
+ },
1872
+ {
1873
+ "epoch": 0.39622576944342885,
1874
+ "learning_rate": 3.326932974627501e-05,
1875
+ "loss": 0.554,
1876
+ "step": 13750
1877
+ },
1878
+ {
1879
+ "epoch": 0.39766659042322317,
1880
+ "learning_rate": 3.316183516278891e-05,
1881
+ "loss": 0.5501,
1882
+ "step": 13800
1883
+ },
1884
+ {
1885
+ "epoch": 0.3991074114030174,
1886
+ "learning_rate": 3.30541714132924e-05,
1887
+ "loss": 0.5537,
1888
+ "step": 13850
1889
+ },
1890
+ {
1891
+ "epoch": 0.40054823238281173,
1892
+ "learning_rate": 3.294634072927468e-05,
1893
+ "loss": 0.5395,
1894
+ "step": 13900
1895
+ },
1896
+ {
1897
+ "epoch": 0.401989053362606,
1898
+ "learning_rate": 3.28383453456849e-05,
1899
+ "loss": 0.5496,
1900
+ "step": 13950
1901
+ },
1902
+ {
1903
+ "epoch": 0.4034298743424003,
1904
+ "learning_rate": 3.2730187500885864e-05,
1905
+ "loss": 0.557,
1906
+ "step": 14000
1907
+ },
1908
+ {
1909
+ "epoch": 0.4034298743424003,
1910
+ "eval_loss": 0.6013383865356445,
1911
+ "eval_runtime": 80.0388,
1912
+ "eval_samples_per_second": 24.988,
1913
+ "eval_steps_per_second": 12.494,
1914
+ "step": 14000
1915
+ },
1916
+ {
1917
+ "epoch": 0.4048706953221946,
1918
+ "learning_rate": 3.262186943660761e-05,
1919
+ "loss": 0.5464,
1920
+ "step": 14050
1921
+ },
1922
+ {
1923
+ "epoch": 0.40631151630198886,
1924
+ "learning_rate": 3.251339339790096e-05,
1925
+ "loss": 0.5635,
1926
+ "step": 14100
1927
+ },
1928
+ {
1929
+ "epoch": 0.4077523372817832,
1930
+ "learning_rate": 3.2404761633091004e-05,
1931
+ "loss": 0.5543,
1932
+ "step": 14150
1933
+ },
1934
+ {
1935
+ "epoch": 0.4091931582615774,
1936
+ "learning_rate": 3.229597639373047e-05,
1937
+ "loss": 0.5635,
1938
+ "step": 14200
1939
+ },
1940
+ {
1941
+ "epoch": 0.41063397924137174,
1942
+ "learning_rate": 3.2187039934553076e-05,
1943
+ "loss": 0.5547,
1944
+ "step": 14250
1945
+ },
1946
+ {
1947
+ "epoch": 0.412074800221166,
1948
+ "learning_rate": 3.207795451342679e-05,
1949
+ "loss": 0.5631,
1950
+ "step": 14300
1951
+ },
1952
+ {
1953
+ "epoch": 0.4135156212009603,
1954
+ "learning_rate": 3.196872239130704e-05,
1955
+ "loss": 0.5431,
1956
+ "step": 14350
1957
+ },
1958
+ {
1959
+ "epoch": 0.4149564421807546,
1960
+ "learning_rate": 3.1859345832189845e-05,
1961
+ "loss": 0.5519,
1962
+ "step": 14400
1963
+ },
1964
+ {
1965
+ "epoch": 0.41639726316054887,
1966
+ "learning_rate": 3.174982710306489e-05,
1967
+ "loss": 0.5447,
1968
+ "step": 14450
1969
+ },
1970
+ {
1971
+ "epoch": 0.4178380841403432,
1972
+ "learning_rate": 3.164016847386854e-05,
1973
+ "loss": 0.5581,
1974
+ "step": 14500
1975
+ },
1976
+ {
1977
+ "epoch": 0.4178380841403432,
1978
+ "eval_loss": 0.5996755957603455,
1979
+ "eval_runtime": 80.3224,
1980
+ "eval_samples_per_second": 24.9,
1981
+ "eval_steps_per_second": 12.45,
1982
+ "step": 14500
1983
+ },
1984
+ {
1985
+ "epoch": 0.41927890512013744,
1986
+ "learning_rate": 3.1530372217436795e-05,
1987
+ "loss": 0.546,
1988
+ "step": 14550
1989
+ },
1990
+ {
1991
+ "epoch": 0.42071972609993175,
1992
+ "learning_rate": 3.142044060945819e-05,
1993
+ "loss": 0.5568,
1994
+ "step": 14600
1995
+ },
1996
+ {
1997
+ "epoch": 0.422160547079726,
1998
+ "learning_rate": 3.1310375928426616e-05,
1999
+ "loss": 0.5446,
2000
+ "step": 14650
2001
+ },
2002
+ {
2003
+ "epoch": 0.4236013680595203,
2004
+ "learning_rate": 3.120018045559409e-05,
2005
+ "loss": 0.5335,
2006
+ "step": 14700
2007
+ },
2008
+ {
2009
+ "epoch": 0.4250421890393146,
2010
+ "learning_rate": 3.10898564749235e-05,
2011
+ "loss": 0.5446,
2012
+ "step": 14750
2013
+ },
2014
+ {
2015
+ "epoch": 0.4264830100191089,
2016
+ "learning_rate": 3.097940627304122e-05,
2017
+ "loss": 0.532,
2018
+ "step": 14800
2019
+ },
2020
+ {
2021
+ "epoch": 0.4279238309989032,
2022
+ "learning_rate": 3.0868832139189766e-05,
2023
+ "loss": 0.5545,
2024
+ "step": 14850
2025
+ },
2026
+ {
2027
+ "epoch": 0.42936465197869744,
2028
+ "learning_rate": 3.075813636518031e-05,
2029
+ "loss": 0.5482,
2030
+ "step": 14900
2031
+ },
2032
+ {
2033
+ "epoch": 0.43080547295849175,
2034
+ "learning_rate": 3.06473212453452e-05,
2035
+ "loss": 0.5297,
2036
+ "step": 14950
2037
+ },
2038
+ {
2039
+ "epoch": 0.432246293938286,
2040
+ "learning_rate": 3.053638907649042e-05,
2041
+ "loss": 0.5371,
2042
+ "step": 15000
2043
+ },
2044
+ {
2045
+ "epoch": 0.432246293938286,
2046
+ "eval_loss": 0.5982165336608887,
2047
+ "eval_runtime": 79.6872,
2048
+ "eval_samples_per_second": 25.098,
2049
+ "eval_steps_per_second": 12.549,
2050
+ "step": 15000
2051
+ },
2052
+ {
2053
+ "epoch": 0.4336871149180803,
2054
+ "learning_rate": 3.0425342157847957e-05,
2055
+ "loss": 0.5467,
2056
+ "step": 15050
2057
+ },
2058
+ {
2059
+ "epoch": 0.43512793589787463,
2060
+ "learning_rate": 3.0314182791028128e-05,
2061
+ "loss": 0.5436,
2062
+ "step": 15100
2063
+ },
2064
+ {
2065
+ "epoch": 0.4365687568776689,
2066
+ "learning_rate": 3.0202913279971935e-05,
2067
+ "loss": 0.5317,
2068
+ "step": 15150
2069
+ },
2070
+ {
2071
+ "epoch": 0.4380095778574632,
2072
+ "learning_rate": 3.009153593090327e-05,
2073
+ "loss": 0.5357,
2074
+ "step": 15200
2075
+ },
2076
+ {
2077
+ "epoch": 0.43945039883725745,
2078
+ "learning_rate": 2.9980053052281126e-05,
2079
+ "loss": 0.5528,
2080
+ "step": 15250
2081
+ },
2082
+ {
2083
+ "epoch": 0.44089121981705176,
2084
+ "learning_rate": 2.9868466954751758e-05,
2085
+ "loss": 0.541,
2086
+ "step": 15300
2087
+ },
2088
+ {
2089
+ "epoch": 0.442332040796846,
2090
+ "learning_rate": 2.9756779951100766e-05,
2091
+ "loss": 0.5447,
2092
+ "step": 15350
2093
+ },
2094
+ {
2095
+ "epoch": 0.4437728617766403,
2096
+ "learning_rate": 2.9644994356205207e-05,
2097
+ "loss": 0.5467,
2098
+ "step": 15400
2099
+ },
2100
+ {
2101
+ "epoch": 0.44521368275643464,
2102
+ "learning_rate": 2.953311248698556e-05,
2103
+ "loss": 0.5374,
2104
+ "step": 15450
2105
+ },
2106
+ {
2107
+ "epoch": 0.4466545037362289,
2108
+ "learning_rate": 2.942113666235774e-05,
2109
+ "loss": 0.5328,
2110
+ "step": 15500
2111
+ },
2112
+ {
2113
+ "epoch": 0.4466545037362289,
2114
+ "eval_loss": 0.5987485647201538,
2115
+ "eval_runtime": 79.3166,
2116
+ "eval_samples_per_second": 25.215,
2117
+ "eval_steps_per_second": 12.608,
2118
+ "step": 15500
2119
+ },
2120
+ {
2121
+ "epoch": 0.4480953247160232,
2122
+ "learning_rate": 2.9309069203185026e-05,
2123
+ "loss": 0.5398,
2124
+ "step": 15550
2125
+ },
2126
+ {
2127
+ "epoch": 0.44953614569581746,
2128
+ "learning_rate": 2.919691243222996e-05,
2129
+ "loss": 0.5469,
2130
+ "step": 15600
2131
+ },
2132
+ {
2133
+ "epoch": 0.45097696667561177,
2134
+ "learning_rate": 2.9084668674106196e-05,
2135
+ "loss": 0.5552,
2136
+ "step": 15650
2137
+ },
2138
+ {
2139
+ "epoch": 0.452417787655406,
2140
+ "learning_rate": 2.8972340255230322e-05,
2141
+ "loss": 0.5379,
2142
+ "step": 15700
2143
+ },
2144
+ {
2145
+ "epoch": 0.45385860863520033,
2146
+ "learning_rate": 2.8859929503773652e-05,
2147
+ "loss": 0.5358,
2148
+ "step": 15750
2149
+ },
2150
+ {
2151
+ "epoch": 0.45529942961499464,
2152
+ "learning_rate": 2.874743874961397e-05,
2153
+ "loss": 0.5391,
2154
+ "step": 15800
2155
+ },
2156
+ {
2157
+ "epoch": 0.4567402505947889,
2158
+ "learning_rate": 2.863487032428722e-05,
2159
+ "loss": 0.5369,
2160
+ "step": 15850
2161
+ },
2162
+ {
2163
+ "epoch": 0.4581810715745832,
2164
+ "learning_rate": 2.85222265609392e-05,
2165
+ "loss": 0.5273,
2166
+ "step": 15900
2167
+ },
2168
+ {
2169
+ "epoch": 0.45962189255437746,
2170
+ "learning_rate": 2.8409509794277195e-05,
2171
+ "loss": 0.5384,
2172
+ "step": 15950
2173
+ },
2174
+ {
2175
+ "epoch": 0.4610627135341718,
2176
+ "learning_rate": 2.8296722360521593e-05,
2177
+ "loss": 0.5508,
2178
+ "step": 16000
2179
+ },
2180
+ {
2181
+ "epoch": 0.4610627135341718,
2182
+ "eval_loss": 0.5947248339653015,
2183
+ "eval_runtime": 79.6937,
2184
+ "eval_samples_per_second": 25.096,
2185
+ "eval_steps_per_second": 12.548,
2186
+ "step": 16000
2187
+ },
2188
+ {
2189
+ "epoch": 0.46250353451396603,
2190
+ "learning_rate": 2.8183866597357467e-05,
2191
+ "loss": 0.5199,
2192
+ "step": 16050
2193
+ },
2194
+ {
2195
+ "epoch": 0.46394435549376034,
2196
+ "learning_rate": 2.8070944843886104e-05,
2197
+ "loss": 0.5433,
2198
+ "step": 16100
2199
+ },
2200
+ {
2201
+ "epoch": 0.46538517647355465,
2202
+ "learning_rate": 2.7957959440576564e-05,
2203
+ "loss": 0.5167,
2204
+ "step": 16150
2205
+ },
2206
+ {
2207
+ "epoch": 0.4668259974533489,
2208
+ "learning_rate": 2.7844912729217116e-05,
2209
+ "loss": 0.5487,
2210
+ "step": 16200
2211
+ },
2212
+ {
2213
+ "epoch": 0.4682668184331432,
2214
+ "learning_rate": 2.773180705286673e-05,
2215
+ "loss": 0.5448,
2216
+ "step": 16250
2217
+ },
2218
+ {
2219
+ "epoch": 0.46970763941293747,
2220
+ "learning_rate": 2.7618644755806527e-05,
2221
+ "loss": 0.5407,
2222
+ "step": 16300
2223
+ },
2224
+ {
2225
+ "epoch": 0.4711484603927318,
2226
+ "learning_rate": 2.7505428183491168e-05,
2227
+ "loss": 0.5253,
2228
+ "step": 16350
2229
+ },
2230
+ {
2231
+ "epoch": 0.47258928137252604,
2232
+ "learning_rate": 2.7392159682500246e-05,
2233
+ "loss": 0.5276,
2234
+ "step": 16400
2235
+ },
2236
+ {
2237
+ "epoch": 0.47403010235232035,
2238
+ "learning_rate": 2.7278841600489652e-05,
2239
+ "loss": 0.5529,
2240
+ "step": 16450
2241
+ },
2242
+ {
2243
+ "epoch": 0.47547092333211466,
2244
+ "learning_rate": 2.7165476286142922e-05,
2245
+ "loss": 0.5293,
2246
+ "step": 16500
2247
+ },
2248
+ {
2249
+ "epoch": 0.47547092333211466,
2250
+ "eval_loss": 0.5968120098114014,
2251
+ "eval_runtime": 79.5645,
2252
+ "eval_samples_per_second": 25.137,
2253
+ "eval_steps_per_second": 12.568,
2254
+ "step": 16500
2255
+ },
2256
+ {
2257
+ "epoch": 0.4769117443119089,
2258
+ "learning_rate": 2.7052066089122545e-05,
2259
+ "loss": 0.5275,
2260
+ "step": 16550
2261
+ },
2262
+ {
2263
+ "epoch": 0.4783525652917032,
2264
+ "learning_rate": 2.6938613360021277e-05,
2265
+ "loss": 0.5272,
2266
+ "step": 16600
2267
+ },
2268
+ {
2269
+ "epoch": 0.4797933862714975,
2270
+ "learning_rate": 2.68251204503134e-05,
2271
+ "loss": 0.5164,
2272
+ "step": 16650
2273
+ },
2274
+ {
2275
+ "epoch": 0.4812342072512918,
2276
+ "learning_rate": 2.6711589712306018e-05,
2277
+ "loss": 0.5099,
2278
+ "step": 16700
2279
+ },
2280
+ {
2281
+ "epoch": 0.48267502823108605,
2282
+ "learning_rate": 2.6598023499090262e-05,
2283
+ "loss": 0.5379,
2284
+ "step": 16750
2285
+ },
2286
+ {
2287
+ "epoch": 0.48411584921088036,
2288
+ "learning_rate": 2.6484424164492554e-05,
2289
+ "loss": 0.5284,
2290
+ "step": 16800
2291
+ },
2292
+ {
2293
+ "epoch": 0.48555667019067467,
2294
+ "learning_rate": 2.6370794063025796e-05,
2295
+ "loss": 0.5178,
2296
+ "step": 16850
2297
+ },
2298
+ {
2299
+ "epoch": 0.4869974911704689,
2300
+ "learning_rate": 2.6257135549840574e-05,
2301
+ "loss": 0.5271,
2302
+ "step": 16900
2303
+ },
2304
+ {
2305
+ "epoch": 0.48843831215026323,
2306
+ "learning_rate": 2.614345098067637e-05,
2307
+ "loss": 0.534,
2308
+ "step": 16950
2309
+ },
2310
+ {
2311
+ "epoch": 0.4898791331300575,
2312
+ "learning_rate": 2.60297427118127e-05,
2313
+ "loss": 0.5394,
2314
+ "step": 17000
2315
+ },
2316
+ {
2317
+ "epoch": 0.4898791331300575,
2318
+ "eval_loss": 0.5935970544815063,
2319
+ "eval_runtime": 79.5625,
2320
+ "eval_samples_per_second": 25.137,
2321
+ "eval_steps_per_second": 12.569,
2322
+ "step": 17000
2323
+ },
2324
+ {
2325
+ "epoch": 0.4913199541098518,
2326
+ "learning_rate": 2.5916013100020276e-05,
2327
+ "loss": 0.5348,
2328
+ "step": 17050
2329
+ },
2330
+ {
2331
+ "epoch": 0.4927607750896461,
2332
+ "learning_rate": 2.5802264502512203e-05,
2333
+ "loss": 0.5168,
2334
+ "step": 17100
2335
+ },
2336
+ {
2337
+ "epoch": 0.49420159606944036,
2338
+ "learning_rate": 2.5688499276895067e-05,
2339
+ "loss": 0.5137,
2340
+ "step": 17150
2341
+ },
2342
+ {
2343
+ "epoch": 0.4956424170492347,
2344
+ "learning_rate": 2.5574719781120123e-05,
2345
+ "loss": 0.522,
2346
+ "step": 17200
2347
+ },
2348
+ {
2349
+ "epoch": 0.49708323802902893,
2350
+ "learning_rate": 2.546092837343437e-05,
2351
+ "loss": 0.5327,
2352
+ "step": 17250
2353
+ },
2354
+ {
2355
+ "epoch": 0.49852405900882324,
2356
+ "learning_rate": 2.5347127412331712e-05,
2357
+ "loss": 0.5219,
2358
+ "step": 17300
2359
+ },
2360
+ {
2361
+ "epoch": 0.4999648799886175,
2362
+ "learning_rate": 2.5233319256504056e-05,
2363
+ "loss": 0.5225,
2364
+ "step": 17350
2365
+ },
2366
+ {
2367
+ "epoch": 0.5014057009684117,
2368
+ "learning_rate": 2.5119506264792442e-05,
2369
+ "loss": 0.5031,
2370
+ "step": 17400
2371
+ },
2372
+ {
2373
+ "epoch": 0.5028465219482061,
2374
+ "learning_rate": 2.5005690796138125e-05,
2375
+ "loss": 0.5083,
2376
+ "step": 17450
2377
+ },
2378
+ {
2379
+ "epoch": 0.5042873429280004,
2380
+ "learning_rate": 2.48918752095337e-05,
2381
+ "loss": 0.5071,
2382
+ "step": 17500
2383
+ },
2384
+ {
2385
+ "epoch": 0.5042873429280004,
2386
+ "eval_loss": 0.5912173986434937,
2387
+ "eval_runtime": 80.629,
2388
+ "eval_samples_per_second": 24.805,
2389
+ "eval_steps_per_second": 12.402,
2390
+ "step": 17500
2391
+ },
2392
+ {
2393
+ "epoch": 0.5057281639077946,
2394
+ "learning_rate": 2.4778061863974236e-05,
2395
+ "loss": 0.5202,
2396
+ "step": 17550
2397
+ },
2398
+ {
2399
+ "epoch": 0.507168984887589,
2400
+ "learning_rate": 2.4664253118408303e-05,
2401
+ "loss": 0.5196,
2402
+ "step": 17600
2403
+ },
2404
+ {
2405
+ "epoch": 0.5086098058673832,
2406
+ "learning_rate": 2.4550451331689165e-05,
2407
+ "loss": 0.5207,
2408
+ "step": 17650
2409
+ },
2410
+ {
2411
+ "epoch": 0.5100506268471775,
2412
+ "learning_rate": 2.4436658862525845e-05,
2413
+ "loss": 0.5239,
2414
+ "step": 17700
2415
+ },
2416
+ {
2417
+ "epoch": 0.5114914478269718,
2418
+ "learning_rate": 2.4322878069434245e-05,
2419
+ "loss": 0.5311,
2420
+ "step": 17750
2421
+ },
2422
+ {
2423
+ "epoch": 0.5129322688067661,
2424
+ "learning_rate": 2.4209111310688265e-05,
2425
+ "loss": 0.511,
2426
+ "step": 17800
2427
+ },
2428
+ {
2429
+ "epoch": 0.5143730897865604,
2430
+ "learning_rate": 2.4095360944270917e-05,
2431
+ "loss": 0.5054,
2432
+ "step": 17850
2433
+ },
2434
+ {
2435
+ "epoch": 0.5158139107663546,
2436
+ "learning_rate": 2.398162932782547e-05,
2437
+ "loss": 0.518,
2438
+ "step": 17900
2439
+ },
2440
+ {
2441
+ "epoch": 0.517254731746149,
2442
+ "learning_rate": 2.3867918818606554e-05,
2443
+ "loss": 0.5269,
2444
+ "step": 17950
2445
+ },
2446
+ {
2447
+ "epoch": 0.5186955527259433,
2448
+ "learning_rate": 2.3754231773431336e-05,
2449
+ "loss": 0.5332,
2450
+ "step": 18000
2451
+ },
2452
+ {
2453
+ "epoch": 0.5186955527259433,
2454
+ "eval_loss": 0.5898262858390808,
2455
+ "eval_runtime": 80.1046,
2456
+ "eval_samples_per_second": 24.967,
2457
+ "eval_steps_per_second": 12.484,
2458
+ "step": 18000
2459
+ },
2460
+ {
2461
+ "epoch": 0.5201363737057375,
2462
+ "learning_rate": 2.3640570548630653e-05,
2463
+ "loss": 0.5313,
2464
+ "step": 18050
2465
+ },
2466
+ {
2467
+ "epoch": 0.5215771946855318,
2468
+ "learning_rate": 2.3526937500000172e-05,
2469
+ "loss": 0.5333,
2470
+ "step": 18100
2471
+ },
2472
+ {
2473
+ "epoch": 0.5230180156653261,
2474
+ "learning_rate": 2.341333498275157e-05,
2475
+ "loss": 0.5186,
2476
+ "step": 18150
2477
+ },
2478
+ {
2479
+ "epoch": 0.5244588366451204,
2480
+ "learning_rate": 2.3299765351463725e-05,
2481
+ "loss": 0.5167,
2482
+ "step": 18200
2483
+ },
2484
+ {
2485
+ "epoch": 0.5258996576249146,
2486
+ "learning_rate": 2.318623096003389e-05,
2487
+ "loss": 0.5161,
2488
+ "step": 18250
2489
+ },
2490
+ {
2491
+ "epoch": 0.527340478604709,
2492
+ "learning_rate": 2.3072734161628916e-05,
2493
+ "loss": 0.5015,
2494
+ "step": 18300
2495
+ },
2496
+ {
2497
+ "epoch": 0.5287812995845033,
2498
+ "learning_rate": 2.2959277308636518e-05,
2499
+ "loss": 0.4986,
2500
+ "step": 18350
2501
+ },
2502
+ {
2503
+ "epoch": 0.5302221205642975,
2504
+ "learning_rate": 2.2845862752616455e-05,
2505
+ "loss": 0.502,
2506
+ "step": 18400
2507
+ },
2508
+ {
2509
+ "epoch": 0.5316629415440918,
2510
+ "learning_rate": 2.2732492844251817e-05,
2511
+ "loss": 0.5102,
2512
+ "step": 18450
2513
+ },
2514
+ {
2515
+ "epoch": 0.5331037625238861,
2516
+ "learning_rate": 2.261916993330032e-05,
2517
+ "loss": 0.5026,
2518
+ "step": 18500
2519
+ },
2520
+ {
2521
+ "epoch": 0.5331037625238861,
2522
+ "eval_loss": 0.5886390805244446,
2523
+ "eval_runtime": 80.592,
2524
+ "eval_samples_per_second": 24.816,
2525
+ "eval_steps_per_second": 12.408,
2526
+ "step": 18500
2527
+ },
2528
+ {
2529
+ "epoch": 0.5345445835036804,
2530
+ "learning_rate": 2.2505896368545577e-05,
2531
+ "loss": 0.5174,
2532
+ "step": 18550
2533
+ },
2534
+ {
2535
+ "epoch": 0.5359854044834746,
2536
+ "learning_rate": 2.2392674497748438e-05,
2537
+ "loss": 0.5185,
2538
+ "step": 18600
2539
+ },
2540
+ {
2541
+ "epoch": 0.537426225463269,
2542
+ "learning_rate": 2.2279506667598317e-05,
2543
+ "loss": 0.5027,
2544
+ "step": 18650
2545
+ },
2546
+ {
2547
+ "epoch": 0.5388670464430633,
2548
+ "learning_rate": 2.2166395223664553e-05,
2549
+ "loss": 0.5035,
2550
+ "step": 18700
2551
+ },
2552
+ {
2553
+ "epoch": 0.5403078674228575,
2554
+ "learning_rate": 2.2053342510347816e-05,
2555
+ "loss": 0.499,
2556
+ "step": 18750
2557
+ },
2558
+ {
2559
+ "epoch": 0.5417486884026518,
2560
+ "learning_rate": 2.194035087083146e-05,
2561
+ "loss": 0.5136,
2562
+ "step": 18800
2563
+ },
2564
+ {
2565
+ "epoch": 0.5431895093824461,
2566
+ "learning_rate": 2.1827422647033033e-05,
2567
+ "loss": 0.5095,
2568
+ "step": 18850
2569
+ },
2570
+ {
2571
+ "epoch": 0.5446303303622404,
2572
+ "learning_rate": 2.1714560179555675e-05,
2573
+ "loss": 0.5086,
2574
+ "step": 18900
2575
+ },
2576
+ {
2577
+ "epoch": 0.5460711513420347,
2578
+ "learning_rate": 2.160176580763965e-05,
2579
+ "loss": 0.5063,
2580
+ "step": 18950
2581
+ },
2582
+ {
2583
+ "epoch": 0.547511972321829,
2584
+ "learning_rate": 2.1489041869113818e-05,
2585
+ "loss": 0.509,
2586
+ "step": 19000
2587
+ },
2588
+ {
2589
+ "epoch": 0.547511972321829,
2590
+ "eval_loss": 0.5874903798103333,
2591
+ "eval_runtime": 81.4283,
2592
+ "eval_samples_per_second": 24.561,
2593
+ "eval_steps_per_second": 12.281,
2594
+ "step": 19000
2595
+ },
2596
+ {
2597
+ "epoch": 0.5489527933016233,
2598
+ "learning_rate": 2.137639070034722e-05,
2599
+ "loss": 0.5107,
2600
+ "step": 19050
2601
+ },
2602
+ {
2603
+ "epoch": 0.5503936142814175,
2604
+ "learning_rate": 2.1263814636200632e-05,
2605
+ "loss": 0.521,
2606
+ "step": 19100
2607
+ },
2608
+ {
2609
+ "epoch": 0.5518344352612118,
2610
+ "learning_rate": 2.1151316009978183e-05,
2611
+ "loss": 0.493,
2612
+ "step": 19150
2613
+ },
2614
+ {
2615
+ "epoch": 0.5532752562410062,
2616
+ "learning_rate": 2.103889715337898e-05,
2617
+ "loss": 0.4991,
2618
+ "step": 19200
2619
+ },
2620
+ {
2621
+ "epoch": 0.5547160772208004,
2622
+ "learning_rate": 2.092656039644878e-05,
2623
+ "loss": 0.5057,
2624
+ "step": 19250
2625
+ },
2626
+ {
2627
+ "epoch": 0.5561568982005947,
2628
+ "learning_rate": 2.081430806753173e-05,
2629
+ "loss": 0.5132,
2630
+ "step": 19300
2631
+ },
2632
+ {
2633
+ "epoch": 0.557597719180389,
2634
+ "learning_rate": 2.0702142493222055e-05,
2635
+ "loss": 0.5083,
2636
+ "step": 19350
2637
+ },
2638
+ {
2639
+ "epoch": 0.5590385401601833,
2640
+ "learning_rate": 2.059006599831587e-05,
2641
+ "loss": 0.4962,
2642
+ "step": 19400
2643
+ },
2644
+ {
2645
+ "epoch": 0.5604793611399775,
2646
+ "learning_rate": 2.0478080905762998e-05,
2647
+ "loss": 0.513,
2648
+ "step": 19450
2649
+ },
2650
+ {
2651
+ "epoch": 0.5619201821197718,
2652
+ "learning_rate": 2.0366189536618806e-05,
2653
+ "loss": 0.5125,
2654
+ "step": 19500
2655
+ },
2656
+ {
2657
+ "epoch": 0.5619201821197718,
2658
+ "eval_loss": 0.5878966450691223,
2659
+ "eval_runtime": 79.8864,
2660
+ "eval_samples_per_second": 25.036,
2661
+ "eval_steps_per_second": 12.518,
2662
+ "step": 19500
2663
+ },
2664
+ {
2665
+ "epoch": 0.5633610030995662,
2666
+ "learning_rate": 2.0254394209996114e-05,
2667
+ "loss": 0.4956,
2668
+ "step": 19550
2669
+ },
2670
+ {
2671
+ "epoch": 0.5648018240793604,
2672
+ "learning_rate": 2.0142697243017118e-05,
2673
+ "loss": 0.4981,
2674
+ "step": 19600
2675
+ },
2676
+ {
2677
+ "epoch": 0.5662426450591547,
2678
+ "learning_rate": 2.003110095076535e-05,
2679
+ "loss": 0.5043,
2680
+ "step": 19650
2681
+ },
2682
+ {
2683
+ "epoch": 0.567683466038949,
2684
+ "learning_rate": 1.991960764623773e-05,
2685
+ "loss": 0.4866,
2686
+ "step": 19700
2687
+ },
2688
+ {
2689
+ "epoch": 0.5691242870187433,
2690
+ "learning_rate": 1.9808219640296596e-05,
2691
+ "loss": 0.4904,
2692
+ "step": 19750
2693
+ },
2694
+ {
2695
+ "epoch": 0.5705651079985375,
2696
+ "learning_rate": 1.9696939241621824e-05,
2697
+ "loss": 0.4968,
2698
+ "step": 19800
2699
+ },
2700
+ {
2701
+ "epoch": 0.5720059289783318,
2702
+ "learning_rate": 1.9585768756662972e-05,
2703
+ "loss": 0.4955,
2704
+ "step": 19850
2705
+ },
2706
+ {
2707
+ "epoch": 0.5734467499581262,
2708
+ "learning_rate": 1.9474710489591463e-05,
2709
+ "loss": 0.4988,
2710
+ "step": 19900
2711
+ },
2712
+ {
2713
+ "epoch": 0.5748875709379204,
2714
+ "learning_rate": 1.936376674225286e-05,
2715
+ "loss": 0.5132,
2716
+ "step": 19950
2717
+ },
2718
+ {
2719
+ "epoch": 0.5763283919177147,
2720
+ "learning_rate": 1.925293981411912e-05,
2721
+ "loss": 0.4903,
2722
+ "step": 20000
2723
+ },
2724
+ {
2725
+ "epoch": 0.5763283919177147,
2726
+ "eval_loss": 0.589536726474762,
2727
+ "eval_runtime": 79.6161,
2728
+ "eval_samples_per_second": 25.121,
2729
+ "eval_steps_per_second": 12.56,
2730
+ "step": 20000
2731
+ },
2732
+ {
2733
+ "epoch": 0.577769212897509,
2734
+ "learning_rate": 1.914223200224096e-05,
2735
+ "loss": 0.504,
2736
+ "step": 20050
2737
+ },
2738
+ {
2739
+ "epoch": 0.5792100338773033,
2740
+ "learning_rate": 1.9031645601200227e-05,
2741
+ "loss": 0.502,
2742
+ "step": 20100
2743
+ },
2744
+ {
2745
+ "epoch": 0.5806508548570976,
2746
+ "learning_rate": 1.8921182903062363e-05,
2747
+ "loss": 0.5119,
2748
+ "step": 20150
2749
+ },
2750
+ {
2751
+ "epoch": 0.5820916758368918,
2752
+ "learning_rate": 1.8810846197328873e-05,
2753
+ "loss": 0.5023,
2754
+ "step": 20200
2755
+ },
2756
+ {
2757
+ "epoch": 0.5835324968166862,
2758
+ "learning_rate": 1.8700637770889896e-05,
2759
+ "loss": 0.5141,
2760
+ "step": 20250
2761
+ },
2762
+ {
2763
+ "epoch": 0.5849733177964804,
2764
+ "learning_rate": 1.8590559907976784e-05,
2765
+ "loss": 0.4965,
2766
+ "step": 20300
2767
+ },
2768
+ {
2769
+ "epoch": 0.5864141387762747,
2770
+ "learning_rate": 1.8480614890114777e-05,
2771
+ "loss": 0.5073,
2772
+ "step": 20350
2773
+ },
2774
+ {
2775
+ "epoch": 0.587854959756069,
2776
+ "learning_rate": 1.8370804996075707e-05,
2777
+ "loss": 0.5037,
2778
+ "step": 20400
2779
+ },
2780
+ {
2781
+ "epoch": 0.5892957807358633,
2782
+ "learning_rate": 1.8261132501830763e-05,
2783
+ "loss": 0.5152,
2784
+ "step": 20450
2785
+ },
2786
+ {
2787
+ "epoch": 0.5907366017156576,
2788
+ "learning_rate": 1.815159968050332e-05,
2789
+ "loss": 0.5076,
2790
+ "step": 20500
2791
+ },
2792
+ {
2793
+ "epoch": 0.5907366017156576,
2794
+ "eval_loss": 0.5854496955871582,
2795
+ "eval_runtime": 81.6796,
2796
+ "eval_samples_per_second": 24.486,
2797
+ "eval_steps_per_second": 12.243,
2798
+ "step": 20500
2799
+ },
2800
+ {
2801
+ "epoch": 0.5921774226954518,
2802
+ "learning_rate": 1.8042208802321822e-05,
2803
+ "loss": 0.4845,
2804
+ "step": 20550
2805
+ },
2806
+ {
2807
+ "epoch": 0.5936182436752462,
2808
+ "learning_rate": 1.7932962134572744e-05,
2809
+ "loss": 0.5,
2810
+ "step": 20600
2811
+ },
2812
+ {
2813
+ "epoch": 0.5950590646550404,
2814
+ "learning_rate": 1.782386194155358e-05,
2815
+ "loss": 0.5003,
2816
+ "step": 20650
2817
+ },
2818
+ {
2819
+ "epoch": 0.5964998856348347,
2820
+ "learning_rate": 1.7714910484525925e-05,
2821
+ "loss": 0.4996,
2822
+ "step": 20700
2823
+ },
2824
+ {
2825
+ "epoch": 0.5979407066146291,
2826
+ "learning_rate": 1.760611002166861e-05,
2827
+ "loss": 0.5056,
2828
+ "step": 20750
2829
+ },
2830
+ {
2831
+ "epoch": 0.5993815275944233,
2832
+ "learning_rate": 1.749746280803087e-05,
2833
+ "loss": 0.4789,
2834
+ "step": 20800
2835
+ },
2836
+ {
2837
+ "epoch": 0.6008223485742176,
2838
+ "learning_rate": 1.7388971095485644e-05,
2839
+ "loss": 0.5089,
2840
+ "step": 20850
2841
+ },
2842
+ {
2843
+ "epoch": 0.6022631695540118,
2844
+ "learning_rate": 1.728063713268287e-05,
2845
+ "loss": 0.49,
2846
+ "step": 20900
2847
+ },
2848
+ {
2849
+ "epoch": 0.6037039905338062,
2850
+ "learning_rate": 1.7172463165002893e-05,
2851
+ "loss": 0.5004,
2852
+ "step": 20950
2853
+ },
2854
+ {
2855
+ "epoch": 0.6051448115136004,
2856
+ "learning_rate": 1.7064451434509943e-05,
2857
+ "loss": 0.4844,
2858
+ "step": 21000
2859
+ },
2860
+ {
2861
+ "epoch": 0.6051448115136004,
2862
+ "eval_loss": 0.5877842903137207,
2863
+ "eval_runtime": 81.851,
2864
+ "eval_samples_per_second": 24.435,
2865
+ "eval_steps_per_second": 12.217,
2866
+ "step": 21000
2867
+ },
2868
+ {
2869
+ "epoch": 0.6065856324933947,
2870
+ "learning_rate": 1.6956604179905618e-05,
2871
+ "loss": 0.5006,
2872
+ "step": 21050
2873
+ },
2874
+ {
2875
+ "epoch": 0.6080264534731891,
2876
+ "learning_rate": 1.684892363648252e-05,
2877
+ "loss": 0.503,
2878
+ "step": 21100
2879
+ },
2880
+ {
2881
+ "epoch": 0.6094672744529833,
2882
+ "learning_rate": 1.6741412036077927e-05,
2883
+ "loss": 0.5008,
2884
+ "step": 21150
2885
+ },
2886
+ {
2887
+ "epoch": 0.6109080954327776,
2888
+ "learning_rate": 1.6634071607027525e-05,
2889
+ "loss": 0.4878,
2890
+ "step": 21200
2891
+ },
2892
+ {
2893
+ "epoch": 0.6123489164125718,
2894
+ "learning_rate": 1.6526904574119213e-05,
2895
+ "loss": 0.4994,
2896
+ "step": 21250
2897
+ },
2898
+ {
2899
+ "epoch": 0.6137897373923662,
2900
+ "learning_rate": 1.6419913158547e-05,
2901
+ "loss": 0.4964,
2902
+ "step": 21300
2903
+ },
2904
+ {
2905
+ "epoch": 0.6152305583721605,
2906
+ "learning_rate": 1.631309957786498e-05,
2907
+ "loss": 0.4804,
2908
+ "step": 21350
2909
+ },
2910
+ {
2911
+ "epoch": 0.6166713793519547,
2912
+ "learning_rate": 1.620646604594135e-05,
2913
+ "loss": 0.509,
2914
+ "step": 21400
2915
+ },
2916
+ {
2917
+ "epoch": 0.6181122003317491,
2918
+ "learning_rate": 1.610001477291253e-05,
2919
+ "loss": 0.4863,
2920
+ "step": 21450
2921
+ },
2922
+ {
2923
+ "epoch": 0.6195530213115433,
2924
+ "learning_rate": 1.5993747965137344e-05,
2925
+ "loss": 0.5092,
2926
+ "step": 21500
2927
+ },
2928
+ {
2929
+ "epoch": 0.6195530213115433,
2930
+ "eval_loss": 0.582338273525238,
2931
+ "eval_runtime": 80.9155,
2932
+ "eval_samples_per_second": 24.717,
2933
+ "eval_steps_per_second": 12.359,
2934
+ "step": 21500
2935
+ },
2936
+ {
2937
+ "epoch": 0.6209938422913376,
2938
+ "learning_rate": 1.588766782515136e-05,
2939
+ "loss": 0.4863,
2940
+ "step": 21550
2941
+ },
2942
+ {
2943
+ "epoch": 0.6224346632711318,
2944
+ "learning_rate": 1.5781776551621106e-05,
2945
+ "loss": 0.4874,
2946
+ "step": 21600
2947
+ },
2948
+ {
2949
+ "epoch": 0.6238754842509262,
2950
+ "learning_rate": 1.5676076339298622e-05,
2951
+ "loss": 0.4979,
2952
+ "step": 21650
2953
+ },
2954
+ {
2955
+ "epoch": 0.6253163052307205,
2956
+ "learning_rate": 1.5570569378975908e-05,
2957
+ "loss": 0.4825,
2958
+ "step": 21700
2959
+ },
2960
+ {
2961
+ "epoch": 0.6267571262105147,
2962
+ "learning_rate": 1.5465257857439548e-05,
2963
+ "loss": 0.4943,
2964
+ "step": 21750
2965
+ },
2966
+ {
2967
+ "epoch": 0.6281979471903091,
2968
+ "learning_rate": 1.5360143957425345e-05,
2969
+ "loss": 0.4936,
2970
+ "step": 21800
2971
+ },
2972
+ {
2973
+ "epoch": 0.6296387681701033,
2974
+ "learning_rate": 1.525522985757314e-05,
2975
+ "loss": 0.4858,
2976
+ "step": 21850
2977
+ },
2978
+ {
2979
+ "epoch": 0.6310795891498976,
2980
+ "learning_rate": 1.5150517732381583e-05,
2981
+ "loss": 0.4797,
2982
+ "step": 21900
2983
+ },
2984
+ {
2985
+ "epoch": 0.6325204101296918,
2986
+ "learning_rate": 1.5046009752163132e-05,
2987
+ "loss": 0.4866,
2988
+ "step": 21950
2989
+ },
2990
+ {
2991
+ "epoch": 0.6339612311094862,
2992
+ "learning_rate": 1.4941708082999034e-05,
2993
+ "loss": 0.4677,
2994
+ "step": 22000
2995
+ },
2996
+ {
2997
+ "epoch": 0.6339612311094862,
2998
+ "eval_loss": 0.5860006213188171,
2999
+ "eval_runtime": 83.1333,
3000
+ "eval_samples_per_second": 24.058,
3001
+ "eval_steps_per_second": 12.029,
3002
+ "step": 22000
3003
+ },
3004
+ {
3005
+ "epoch": 0.6354020520892805,
3006
+ "learning_rate": 1.4837614886694428e-05,
3007
+ "loss": 0.5115,
3008
+ "step": 22050
3009
+ },
3010
+ {
3011
+ "epoch": 0.6368428730690747,
3012
+ "learning_rate": 1.4733732320733559e-05,
3013
+ "loss": 0.4914,
3014
+ "step": 22100
3015
+ },
3016
+ {
3017
+ "epoch": 0.6382836940488691,
3018
+ "learning_rate": 1.4630062538235043e-05,
3019
+ "loss": 0.4961,
3020
+ "step": 22150
3021
+ },
3022
+ {
3023
+ "epoch": 0.6397245150286633,
3024
+ "learning_rate": 1.452660768790725e-05,
3025
+ "loss": 0.4938,
3026
+ "step": 22200
3027
+ },
3028
+ {
3029
+ "epoch": 0.6411653360084576,
3030
+ "learning_rate": 1.4423369914003748e-05,
3031
+ "loss": 0.4941,
3032
+ "step": 22250
3033
+ },
3034
+ {
3035
+ "epoch": 0.6426061569882519,
3036
+ "learning_rate": 1.4320351356278905e-05,
3037
+ "loss": 0.4951,
3038
+ "step": 22300
3039
+ },
3040
+ {
3041
+ "epoch": 0.6440469779680462,
3042
+ "learning_rate": 1.4217554149943504e-05,
3043
+ "loss": 0.5041,
3044
+ "step": 22350
3045
+ },
3046
+ {
3047
+ "epoch": 0.6454877989478405,
3048
+ "learning_rate": 1.4114980425620474e-05,
3049
+ "loss": 0.5011,
3050
+ "step": 22400
3051
+ },
3052
+ {
3053
+ "epoch": 0.6469286199276347,
3054
+ "learning_rate": 1.4012632309300789e-05,
3055
+ "loss": 0.4811,
3056
+ "step": 22450
3057
+ },
3058
+ {
3059
+ "epoch": 0.6483694409074291,
3060
+ "learning_rate": 1.3910511922299321e-05,
3061
+ "loss": 0.4893,
3062
+ "step": 22500
3063
+ },
3064
+ {
3065
+ "epoch": 0.6483694409074291,
3066
+ "eval_loss": 0.5825563073158264,
3067
+ "eval_runtime": 77.7579,
3068
+ "eval_samples_per_second": 25.721,
3069
+ "eval_steps_per_second": 12.86,
3070
+ "step": 22500
3071
+ },
3072
+ {
3073
+ "epoch": 0.6498102618872234,
3074
+ "learning_rate": 1.3808621381210967e-05,
3075
+ "loss": 0.4794,
3076
+ "step": 22550
3077
+ },
3078
+ {
3079
+ "epoch": 0.6512510828670176,
3080
+ "learning_rate": 1.370696279786669e-05,
3081
+ "loss": 0.4987,
3082
+ "step": 22600
3083
+ },
3084
+ {
3085
+ "epoch": 0.6526919038468119,
3086
+ "learning_rate": 1.3605538279289798e-05,
3087
+ "loss": 0.4858,
3088
+ "step": 22650
3089
+ },
3090
+ {
3091
+ "epoch": 0.6541327248266062,
3092
+ "learning_rate": 1.3504349927652286e-05,
3093
+ "loss": 0.474,
3094
+ "step": 22700
3095
+ },
3096
+ {
3097
+ "epoch": 0.6555735458064005,
3098
+ "learning_rate": 1.3403399840231203e-05,
3099
+ "loss": 0.4939,
3100
+ "step": 22750
3101
+ },
3102
+ {
3103
+ "epoch": 0.6570143667861947,
3104
+ "learning_rate": 1.330269010936527e-05,
3105
+ "loss": 0.4954,
3106
+ "step": 22800
3107
+ },
3108
+ {
3109
+ "epoch": 0.6584551877659891,
3110
+ "learning_rate": 1.3202222822411416e-05,
3111
+ "loss": 0.4865,
3112
+ "step": 22850
3113
+ },
3114
+ {
3115
+ "epoch": 0.6598960087457834,
3116
+ "learning_rate": 1.3102000061701613e-05,
3117
+ "loss": 0.4943,
3118
+ "step": 22900
3119
+ },
3120
+ {
3121
+ "epoch": 0.6613368297255776,
3122
+ "learning_rate": 1.300202390449963e-05,
3123
+ "loss": 0.486,
3124
+ "step": 22950
3125
+ },
3126
+ {
3127
+ "epoch": 0.6627776507053719,
3128
+ "learning_rate": 1.290229642295802e-05,
3129
+ "loss": 0.4781,
3130
+ "step": 23000
3131
+ },
3132
+ {
3133
+ "epoch": 0.6627776507053719,
3134
+ "eval_loss": 0.5843392610549927,
3135
+ "eval_runtime": 81.2552,
3136
+ "eval_samples_per_second": 24.614,
3137
+ "eval_steps_per_second": 12.307,
3138
+ "step": 23000
3139
+ },
3140
+ {
3141
+ "epoch": 0.6642184716851662,
3142
+ "learning_rate": 1.2802819684075198e-05,
3143
+ "loss": 0.4771,
3144
+ "step": 23050
3145
+ },
3146
+ {
3147
+ "epoch": 0.6656592926649605,
3148
+ "learning_rate": 1.270359574965253e-05,
3149
+ "loss": 0.4698,
3150
+ "step": 23100
3151
+ },
3152
+ {
3153
+ "epoch": 0.6671001136447547,
3154
+ "learning_rate": 1.2604626676251668e-05,
3155
+ "loss": 0.4824,
3156
+ "step": 23150
3157
+ },
3158
+ {
3159
+ "epoch": 0.6685409346245491,
3160
+ "learning_rate": 1.2505914515151898e-05,
3161
+ "loss": 0.4739,
3162
+ "step": 23200
3163
+ },
3164
+ {
3165
+ "epoch": 0.6699817556043434,
3166
+ "learning_rate": 1.2407461312307583e-05,
3167
+ "loss": 0.489,
3168
+ "step": 23250
3169
+ },
3170
+ {
3171
+ "epoch": 0.6714225765841376,
3172
+ "learning_rate": 1.2309269108305843e-05,
3173
+ "loss": 0.4891,
3174
+ "step": 23300
3175
+ },
3176
+ {
3177
+ "epoch": 0.6728633975639319,
3178
+ "learning_rate": 1.2211339938324179e-05,
3179
+ "loss": 0.5021,
3180
+ "step": 23350
3181
+ },
3182
+ {
3183
+ "epoch": 0.6743042185437262,
3184
+ "learning_rate": 1.211367583208835e-05,
3185
+ "loss": 0.4603,
3186
+ "step": 23400
3187
+ },
3188
+ {
3189
+ "epoch": 0.6757450395235205,
3190
+ "learning_rate": 1.201627881383027e-05,
3191
+ "loss": 0.4795,
3192
+ "step": 23450
3193
+ },
3194
+ {
3195
+ "epoch": 0.6771858605033148,
3196
+ "learning_rate": 1.1919150902246045e-05,
3197
+ "loss": 0.4789,
3198
+ "step": 23500
3199
+ },
3200
+ {
3201
+ "epoch": 0.6771858605033148,
3202
+ "eval_loss": 0.5845726728439331,
3203
+ "eval_runtime": 82.5914,
3204
+ "eval_samples_per_second": 24.216,
3205
+ "eval_steps_per_second": 12.108,
3206
+ "step": 23500
3207
+ },
3208
+ {
3209
+ "epoch": 0.6786266814831091,
3210
+ "learning_rate": 1.1822294110454193e-05,
3211
+ "loss": 0.4852,
3212
+ "step": 23550
3213
+ },
3214
+ {
3215
+ "epoch": 0.6800675024629034,
3216
+ "learning_rate": 1.172571044595383e-05,
3217
+ "loss": 0.4997,
3218
+ "step": 23600
3219
+ },
3220
+ {
3221
+ "epoch": 0.6815083234426976,
3222
+ "learning_rate": 1.1629401910583146e-05,
3223
+ "loss": 0.4864,
3224
+ "step": 23650
3225
+ },
3226
+ {
3227
+ "epoch": 0.6829491444224919,
3228
+ "learning_rate": 1.1533370500477845e-05,
3229
+ "loss": 0.4797,
3230
+ "step": 23700
3231
+ },
3232
+ {
3233
+ "epoch": 0.6843899654022862,
3234
+ "learning_rate": 1.1437618206029832e-05,
3235
+ "loss": 0.4843,
3236
+ "step": 23750
3237
+ },
3238
+ {
3239
+ "epoch": 0.6858307863820805,
3240
+ "learning_rate": 1.1342147011845911e-05,
3241
+ "loss": 0.4783,
3242
+ "step": 23800
3243
+ },
3244
+ {
3245
+ "epoch": 0.6872716073618748,
3246
+ "learning_rate": 1.124695889670666e-05,
3247
+ "loss": 0.4826,
3248
+ "step": 23850
3249
+ },
3250
+ {
3251
+ "epoch": 0.6887124283416691,
3252
+ "learning_rate": 1.1152055833525463e-05,
3253
+ "loss": 0.4618,
3254
+ "step": 23900
3255
+ },
3256
+ {
3257
+ "epoch": 0.6901532493214634,
3258
+ "learning_rate": 1.1057439789307566e-05,
3259
+ "loss": 0.4733,
3260
+ "step": 23950
3261
+ },
3262
+ {
3263
+ "epoch": 0.6915940703012576,
3264
+ "learning_rate": 1.0963112725109312e-05,
3265
+ "loss": 0.4811,
3266
+ "step": 24000
3267
+ },
3268
+ {
3269
+ "epoch": 0.6915940703012576,
3270
+ "eval_loss": 0.5840329527854919,
3271
+ "eval_runtime": 79.7677,
3272
+ "eval_samples_per_second": 25.073,
3273
+ "eval_steps_per_second": 12.536,
3274
+ "step": 24000
3275
+ },
3276
+ {
3277
+ "epoch": 0.6930348912810519,
3278
+ "learning_rate": 1.0869076595997535e-05,
3279
+ "loss": 0.4812,
3280
+ "step": 24050
3281
+ },
3282
+ {
3283
+ "epoch": 0.6944757122608463,
3284
+ "learning_rate": 1.0775333351008985e-05,
3285
+ "loss": 0.4989,
3286
+ "step": 24100
3287
+ },
3288
+ {
3289
+ "epoch": 0.6959165332406405,
3290
+ "learning_rate": 1.068188493310999e-05,
3291
+ "loss": 0.485,
3292
+ "step": 24150
3293
+ },
3294
+ {
3295
+ "epoch": 0.6973573542204348,
3296
+ "learning_rate": 1.0588733279156118e-05,
3297
+ "loss": 0.481,
3298
+ "step": 24200
3299
+ },
3300
+ {
3301
+ "epoch": 0.6987981752002291,
3302
+ "learning_rate": 1.0495880319852105e-05,
3303
+ "loss": 0.4792,
3304
+ "step": 24250
3305
+ },
3306
+ {
3307
+ "epoch": 0.7002389961800234,
3308
+ "learning_rate": 1.040332797971178e-05,
3309
+ "loss": 0.4842,
3310
+ "step": 24300
3311
+ },
3312
+ {
3313
+ "epoch": 0.7016798171598176,
3314
+ "learning_rate": 1.031107817701819e-05,
3315
+ "loss": 0.4864,
3316
+ "step": 24350
3317
+ },
3318
+ {
3319
+ "epoch": 0.7031206381396119,
3320
+ "learning_rate": 1.0219132823783883e-05,
3321
+ "loss": 0.4731,
3322
+ "step": 24400
3323
+ },
3324
+ {
3325
+ "epoch": 0.7045614591194063,
3326
+ "learning_rate": 1.0127493825711207e-05,
3327
+ "loss": 0.4714,
3328
+ "step": 24450
3329
+ },
3330
+ {
3331
+ "epoch": 0.7060022800992005,
3332
+ "learning_rate": 1.0036163082152886e-05,
3333
+ "loss": 0.4873,
3334
+ "step": 24500
3335
+ },
3336
+ {
3337
+ "epoch": 0.7060022800992005,
3338
+ "eval_loss": 0.5816783905029297,
3339
+ "eval_runtime": 76.778,
3340
+ "eval_samples_per_second": 26.049,
3341
+ "eval_steps_per_second": 13.025,
3342
+ "step": 24500
3343
+ },
3344
+ {
3345
+ "epoch": 0.7074431010789948,
3346
+ "learning_rate": 9.945142486072581e-06,
3347
+ "loss": 0.4673,
3348
+ "step": 24550
3349
+ },
3350
+ {
3351
+ "epoch": 0.7088839220587891,
3352
+ "learning_rate": 9.854433924005727e-06,
3353
+ "loss": 0.468,
3354
+ "step": 24600
3355
+ },
3356
+ {
3357
+ "epoch": 0.7103247430385834,
3358
+ "learning_rate": 9.764039276020367e-06,
3359
+ "loss": 0.4798,
3360
+ "step": 24650
3361
+ },
3362
+ {
3363
+ "epoch": 0.7117655640183776,
3364
+ "learning_rate": 9.67396041567821e-06,
3365
+ "loss": 0.4652,
3366
+ "step": 24700
3367
+ },
3368
+ {
3369
+ "epoch": 0.7132063849981719,
3370
+ "learning_rate": 9.584199209995855e-06,
3371
+ "loss": 0.4787,
3372
+ "step": 24750
3373
+ },
3374
+ {
3375
+ "epoch": 0.7146472059779663,
3376
+ "learning_rate": 9.494757519405987e-06,
3377
+ "loss": 0.4761,
3378
+ "step": 24800
3379
+ },
3380
+ {
3381
+ "epoch": 0.7160880269577605,
3382
+ "learning_rate": 9.405637197718877e-06,
3383
+ "loss": 0.4642,
3384
+ "step": 24850
3385
+ },
3386
+ {
3387
+ "epoch": 0.7175288479375548,
3388
+ "learning_rate": 9.316840092083989e-06,
3389
+ "loss": 0.4811,
3390
+ "step": 24900
3391
+ },
3392
+ {
3393
+ "epoch": 0.7189696689173491,
3394
+ "learning_rate": 9.228368042951622e-06,
3395
+ "loss": 0.4819,
3396
+ "step": 24950
3397
+ },
3398
+ {
3399
+ "epoch": 0.7204104898971434,
3400
+ "learning_rate": 9.140222884034827e-06,
3401
+ "loss": 0.4766,
3402
+ "step": 25000
3403
+ },
3404
+ {
3405
+ "epoch": 0.7204104898971434,
3406
+ "eval_loss": 0.5827475786209106,
3407
+ "eval_runtime": 76.6955,
3408
+ "eval_samples_per_second": 26.077,
3409
+ "eval_steps_per_second": 13.039,
3410
+ "step": 25000
3411
+ }
3412
+ ],
3413
+ "logging_steps": 50,
3414
+ "max_steps": 34703,
3415
+ "num_input_tokens_seen": 0,
3416
+ "num_train_epochs": 1,
3417
+ "save_steps": 1000,
3418
+ "stateful_callbacks": {
3419
+ "TrainerControl": {
3420
+ "args": {
3421
+ "should_epoch_stop": false,
3422
+ "should_evaluate": false,
3423
+ "should_log": false,
3424
+ "should_save": true,
3425
+ "should_training_stop": false
3426
+ },
3427
+ "attributes": {}
3428
+ }
3429
+ },
3430
+ "total_flos": 7.542329573376e+17,
3431
+ "train_batch_size": 1,
3432
+ "trial_name": null,
3433
+ "trial_params": null
3434
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b6f8916837d65a8e78a50633145c2eb04bef144cf5989cecb0e64af2747acf5
3
+ size 5649