HectorHe commited on
Commit
6c03a91
·
verified ·
1 Parent(s): c068810

Training in progress, step 500

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '
2
+
3
+ ' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '
4
+
5
+ ' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepseekV2ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_deepseek.DeepseekV2Config",
9
+ "AutoModel": "modeling_deepseek.DeepseekV2Model",
10
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekV2ForCausalLM"
11
+ },
12
+ "aux_loss_alpha": 0.001,
13
+ "bos_token_id": 100000,
14
+ "eos_token_id": 100001,
15
+ "ep_size": 1,
16
+ "first_k_dense_replace": 1,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 2048,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 10944,
21
+ "kv_lora_rank": 512,
22
+ "max_position_embeddings": 163840,
23
+ "model_type": "deepseek_v2",
24
+ "moe_intermediate_size": 1408,
25
+ "moe_layer_freq": 1,
26
+ "n_group": 1,
27
+ "n_routed_experts": 64,
28
+ "n_shared_experts": 2,
29
+ "norm_topk_prob": false,
30
+ "num_attention_heads": 16,
31
+ "num_experts_per_tok": 6,
32
+ "num_hidden_layers": 27,
33
+ "num_key_value_heads": 16,
34
+ "pretraining_tp": 1,
35
+ "q_lora_rank": null,
36
+ "qk_nope_head_dim": 128,
37
+ "qk_rope_head_dim": 64,
38
+ "rms_norm_eps": 1e-06,
39
+ "rope_scaling": {
40
+ "beta_fast": 32,
41
+ "beta_slow": 1,
42
+ "factor": 40,
43
+ "mscale": 0.707,
44
+ "mscale_all_dim": 0.707,
45
+ "original_max_position_embeddings": 4096,
46
+ "type": "yarn"
47
+ },
48
+ "rope_theta": 10000,
49
+ "routed_scaling_factor": 1.0,
50
+ "scoring_func": "softmax",
51
+ "seq_aux": true,
52
+ "tie_word_embeddings": false,
53
+ "topk_group": 1,
54
+ "topk_method": "greedy",
55
+ "torch_dtype": "bfloat16",
56
+ "transformers_version": "4.55.0",
57
+ "use_cache": false,
58
+ "v_head_dim": 128,
59
+ "vocab_size": 102400
60
+ }
model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21e6b9f069020314264b7e84a4bb4ebdd8a54087e3b4a52ec4d39d818e155374
3
+ size 4994763632
model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe16ce35379361862e14736824ad343d20ebe2958f1fb13205eaa70321383e1d
3
+ size 4995044944
model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30d2a3ac66de7a5ac1b38de9cb6ecad7bdebf10de09966d97a0a0a39e4c0aafb
3
+ size 4996085000
model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2f2e9b7a350fff26923606e882be74625a89729b33ff22d0df8b4701b79cfbe
3
+ size 4996085224
model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c570016670bbbed81419a669f35dc2b28b31ea0bc5d6daa8a358bfa0afce3e6c
3
+ size 4996085224
model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a478330772cb082a451c430c25b4012433a07cd3f06bf3c9faef58512b7d240c
3
+ size 4995045792
model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25273664f3d3f3e6900396b2ede81b8f334ab268292e39a813b2a2a3a4ddeaaa
3
+ size 1440515736
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|end▁of▁sentence|>"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "100000": {
7
+ "content": "<|begin▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "100001": {
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "100002": {
23
+ "content": "<|fim▁hole|>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "100003": {
31
+ "content": "<|fim▁begin|>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": false
37
+ },
38
+ "100004": {
39
+ "content": "<|fim▁end|>",
40
+ "lstrip": false,
41
+ "normalized": true,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "100005": {
47
+ "content": "<|completion|>",
48
+ "lstrip": false,
49
+ "normalized": true,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "100006": {
55
+ "content": "<|User|>",
56
+ "lstrip": false,
57
+ "normalized": true,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "100007": {
63
+ "content": "<|Assistant|>",
64
+ "lstrip": false,
65
+ "normalized": true,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": false
69
+ },
70
+ "100008": {
71
+ "content": "<|EOT|>",
72
+ "lstrip": false,
73
+ "normalized": true,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "100009": {
79
+ "content": "<|tool▁calls▁begin|>",
80
+ "lstrip": false,
81
+ "normalized": true,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": false
85
+ },
86
+ "100010": {
87
+ "content": "<|tool▁calls▁end|>",
88
+ "lstrip": false,
89
+ "normalized": true,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": false
93
+ },
94
+ "100011": {
95
+ "content": "<|tool▁call▁begin|>",
96
+ "lstrip": false,
97
+ "normalized": true,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": false
101
+ },
102
+ "100012": {
103
+ "content": "<|tool▁call▁end|>",
104
+ "lstrip": false,
105
+ "normalized": true,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": false
109
+ },
110
+ "100013": {
111
+ "content": "<|tool▁outputs▁begin|>",
112
+ "lstrip": false,
113
+ "normalized": true,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": false
117
+ },
118
+ "100014": {
119
+ "content": "<|tool▁outputs▁end|>",
120
+ "lstrip": false,
121
+ "normalized": true,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "100015": {
127
+ "content": "<|tool▁output▁begin|>",
128
+ "lstrip": false,
129
+ "normalized": true,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "100016": {
135
+ "content": "<|tool▁output▁end|>",
136
+ "lstrip": false,
137
+ "normalized": true,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "100017": {
143
+ "content": "<|tool▁sep|>",
144
+ "lstrip": false,
145
+ "normalized": true,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ }
150
+ },
151
+ "bos_token": "<|begin▁of▁sentence|>",
152
+ "clean_up_tokenization_spaces": false,
153
+ "eos_token": "<|end▁of▁sentence|>",
154
+ "extra_special_tokens": {},
155
+ "legacy": true,
156
+ "model_max_length": 16384,
157
+ "pad_token": "<|end▁of▁sentence|>",
158
+ "sp_model_kwargs": {},
159
+ "tokenizer_class": "LlamaTokenizerFast",
160
+ "unk_token": null,
161
+ "use_default_system_prompt": false
162
+ }
training.log ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-08-13 08:32:28 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
2
+ 2025-08-13 08:32:28 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='autoprogrammer/nemotron_code_lf_filtered', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
3
+ 2025-08-13 08:32:28 - INFO - __main__ - Training parameters SFTConfig(
4
+ _n_gpu=1,
5
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
6
+ adafactor=False,
7
+ adam_beta1=0.9,
8
+ adam_beta2=0.999,
9
+ adam_epsilon=1e-08,
10
+ auto_find_batch_size=False,
11
+ average_tokens_across_devices=True,
12
+ batch_eval_metrics=False,
13
+ benchmarks=[],
14
+ bf16=True,
15
+ bf16_full_eval=False,
16
+ callbacks=[],
17
+ chars_per_token=<CHARS_PER_TOKEN>,
18
+ chat_template=None,
19
+ completion_only_loss=None,
20
+ data_seed=None,
21
+ dataloader_drop_last=False,
22
+ dataloader_num_workers=0,
23
+ dataloader_persistent_workers=False,
24
+ dataloader_pin_memory=True,
25
+ dataloader_prefetch_factor=None,
26
+ dataset_batch_size=None,
27
+ dataset_kwargs=None,
28
+ dataset_num_proc=None,
29
+ dataset_text_field=text,
30
+ ddp_backend=None,
31
+ ddp_broadcast_buffers=None,
32
+ ddp_bucket_cap_mb=None,
33
+ ddp_find_unused_parameters=None,
34
+ ddp_timeout=1800000000,
35
+ debug=[],
36
+ deepspeed=None,
37
+ disable_tqdm=False,
38
+ do_eval=True,
39
+ do_predict=False,
40
+ do_train=False,
41
+ eos_token=<EOS_TOKEN>,
42
+ eval_accumulation_steps=None,
43
+ eval_delay=0,
44
+ eval_do_concat_batches=True,
45
+ eval_on_start=False,
46
+ eval_packing=None,
47
+ eval_steps=None,
48
+ eval_strategy=IntervalStrategy.NO,
49
+ eval_use_gather_object=False,
50
+ fp16=False,
51
+ fp16_backend=auto,
52
+ fp16_full_eval=False,
53
+ fp16_opt_level=O1,
54
+ fsdp=[],
55
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
56
+ fsdp_min_num_params=0,
57
+ fsdp_transformer_layer_cls_to_wrap=None,
58
+ full_determinism=False,
59
+ gradient_accumulation_steps=1,
60
+ gradient_checkpointing=True,
61
+ gradient_checkpointing_kwargs={'use_reentrant': False},
62
+ greater_is_better=None,
63
+ group_by_length=False,
64
+ half_precision_backend=auto,
65
+ hub_always_push=False,
66
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-sft-nemotron-code,
67
+ hub_model_revision=main,
68
+ hub_private_repo=None,
69
+ hub_revision=None,
70
+ hub_strategy=HubStrategy.EVERY_SAVE,
71
+ hub_token=<HUB_TOKEN>,
72
+ ignore_data_skip=False,
73
+ include_for_metrics=[],
74
+ include_inputs_for_metrics=False,
75
+ include_num_input_tokens_seen=False,
76
+ include_tokens_per_second=False,
77
+ jit_mode_eval=False,
78
+ label_names=None,
79
+ label_smoothing_factor=0.0,
80
+ learning_rate=1e-05,
81
+ length_column_name=length,
82
+ liger_kernel_config=None,
83
+ load_best_model_at_end=False,
84
+ local_rank=0,
85
+ log_level=info,
86
+ log_level_replica=warning,
87
+ log_on_each_node=True,
88
+ logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/runs/Aug13_08-32-26_ip-172-31-35-162,
89
+ logging_first_step=False,
90
+ logging_nan_inf_filter=True,
91
+ logging_steps=1,
92
+ logging_strategy=IntervalStrategy.STEPS,
93
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
94
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
95
+ max_grad_norm=1.0,
96
+ max_length=8192,
97
+ max_seq_length=None,
98
+ max_steps=-1,
99
+ metric_for_best_model=None,
100
+ model_init_kwargs=None,
101
+ mp_parameters=,
102
+ neftune_noise_alpha=None,
103
+ no_cuda=False,
104
+ num_of_sequences=None,
105
+ num_train_epochs=1,
106
+ optim=OptimizerNames.ADAMW_TORCH,
107
+ optim_args=None,
108
+ optim_target_modules=None,
109
+ output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code,
110
+ overwrite_hub_revision=False,
111
+ overwrite_output_dir=True,
112
+ packing=True,
113
+ pad_to_multiple_of=None,
114
+ pad_token=<PAD_TOKEN>,
115
+ padding_free=False,
116
+ past_index=-1,
117
+ per_device_eval_batch_size=16,
118
+ per_device_train_batch_size=1,
119
+ prediction_loss_only=False,
120
+ push_to_hub=True,
121
+ push_to_hub_model_id=None,
122
+ push_to_hub_organization=None,
123
+ push_to_hub_revision=False,
124
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
125
+ ray_scope=last,
126
+ remove_unused_columns=True,
127
+ report_to=['wandb'],
128
+ restore_callback_states_from_checkpoint=False,
129
+ resume_from_checkpoint=None,
130
+ run_name=None,
131
+ save_on_each_node=False,
132
+ save_only_model=False,
133
+ save_safetensors=True,
134
+ save_steps=500,
135
+ save_strategy=SaveStrategy.STEPS,
136
+ save_total_limit=1,
137
+ seed=1234,
138
+ skip_memory_metrics=True,
139
+ system_prompt=None,
140
+ tf32=None,
141
+ torch_compile=False,
142
+ torch_compile_backend=None,
143
+ torch_compile_mode=None,
144
+ torch_empty_cache_steps=None,
145
+ torchdynamo=None,
146
+ tpu_metrics_debug=False,
147
+ tpu_num_cores=None,
148
+ use_cpu=False,
149
+ use_ipex=False,
150
+ use_legacy_prediction_loop=False,
151
+ use_liger=False,
152
+ use_liger_kernel=False,
153
+ use_mps_device=False,
154
+ wandb_entity=None,
155
+ wandb_project=None,
156
+ wandb_run_group=None,
157
+ warmup_ratio=0.1,
158
+ warmup_steps=0,
159
+ weight_decay=0.0,
160
+ )
161
+ 2025-08-13 08:32:29 - INFO - __main__ - *** Initializing model kwargs ***
162
+ 2025-08-13 09:14:29 - INFO - __main__ - *** Train ***
163
+ 2025-08-13 09:14:29 - INFO - __main__ - DeepseekV2ForCausalLM(
164
+ (model): DeepseekV2Model(
165
+ (embed_tokens): Embedding(102400, 2048)
166
+ (layers): ModuleList(
167
+ (0): DeepseekV2DecoderLayer(
168
+ (self_attn): DeepseekV2FlashAttention2(
169
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
170
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
171
+ (kv_a_layernorm): DeepseekV2RMSNorm()
172
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
173
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
174
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
175
+ )
176
+ (mlp): DeepseekV2MLP(
177
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
178
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
179
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
180
+ (act_fn): SiLU()
181
+ )
182
+ (input_layernorm): DeepseekV2RMSNorm()
183
+ (post_attention_layernorm): DeepseekV2RMSNorm()
184
+ )
185
+ (1-26): 26 x DeepseekV2DecoderLayer(
186
+ (self_attn): DeepseekV2FlashAttention2(
187
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
188
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
189
+ (kv_a_layernorm): DeepseekV2RMSNorm()
190
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
191
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
192
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
193
+ )
194
+ (mlp): DeepseekV2MoE(
195
+ (experts): ModuleList(
196
+ (0-63): 64 x DeepseekV2MLP(
197
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
198
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
199
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
200
+ (act_fn): SiLU()
201
+ )
202
+ )
203
+ (gate): MoEGate()
204
+ (shared_experts): DeepseekV2MLP(
205
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
206
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
207
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
208
+ (act_fn): SiLU()
209
+ )
210
+ )
211
+ (input_layernorm): DeepseekV2RMSNorm()
212
+ (post_attention_layernorm): DeepseekV2RMSNorm()
213
+ )
214
+ )
215
+ (norm): DeepseekV2RMSNorm()
216
+ )
217
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
218
+ )
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff576f8162e1a2a3a7d78df03d3a799feb5a9463dceeb2565ebf309d4c6fe8a
3
+ size 7736