2025-06-30 08:18:54 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) 2025-06-30 08:18:54 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) 2025-06-30 08:18:54 - INFO - __main__ - Training parameters EfficientDistillationConfig( _n_gpu=1, accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, adafactor=False, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, alpha=0.5, auto_find_batch_size=False, average_tokens_across_devices=False, batch_eval_metrics=False, benchmarks=[], bf16=True, bf16_full_eval=False, callbacks=[], ce_loss_scale=3, chars_per_token=, chat_template=None, completion_only_loss=None, data_seed=None, dataloader_drop_last=False, dataloader_num_workers=0, dataloader_persistent_workers=False, dataloader_pin_memory=True, dataloader_prefetch_factor=None, dataset_batch_size=None, dataset_kwargs=None, dataset_num_proc=None, dataset_text_field=text, ddp_backend=None, ddp_broadcast_buffers=None, ddp_bucket_cap_mb=None, ddp_find_unused_parameters=None, ddp_timeout=1800000000, debug=[], deepspeed=None, disable_dropout=True, disable_tqdm=False, do_eval=True, do_predict=False, do_train=False, eos_token=, eval_accumulation_steps=None, eval_delay=0, eval_do_concat_batches=True, eval_on_start=False, eval_packing=None, eval_steps=None, eval_strategy=IntervalStrategy.NO, eval_use_gather_object=False, expert_num=6, fp16=False, fp16_backend=auto, fp16_full_eval=False, fp16_opt_level=O1, fsdp=[], fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_min_num_params=0, fsdp_transformer_layer_cls_to_wrap=None, full_determinism=False, gradient_accumulation_steps=1, gradient_checkpointing=False, gradient_checkpointing_kwargs={'use_reentrant': False}, greater_is_better=None, group_by_length=False, half_precision_backend=auto, hub_always_push=False, hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-token-specific-3-scaled, hub_model_revision=main, hub_private_repo=None, hub_strategy=HubStrategy.EVERY_SAVE, hub_token=, ignore_data_skip=False, include_for_metrics=[], include_inputs_for_metrics=False, include_num_input_tokens_seen=False, include_tokens_per_second=False, jit_mode_eval=False, kl_loss_scale=1.0, label_names=None, label_smoothing_factor=0.0, learning_rate=1e-05, length_column_name=length, lmbda=0.0, load_best_model_at_end=False, local_rank=0, log_level=info, log_level_replica=warning, log_on_each_node=True, logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_token_specific_3_epoch_scaled_3times/runs/Jun30_08-18-54_ip-172-31-78-111, logging_first_step=False, logging_nan_inf_filter=True, logging_steps=1, logging_strategy=IntervalStrategy.STEPS, loss_type=token_specific, lr_scheduler_kwargs={'min_lr_rate': 0.1}, lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, max_grad_norm=1.0, max_length=8192, max_new_tokens=600, max_seq_length=None, max_steps=-1, metric_for_best_model=None, model_init_kwargs=None, mp_parameters=, neftune_noise_alpha=None, no_cuda=False, num_of_sequences=None, num_train_epochs=3, optim=OptimizerNames.ADAMW_TORCH, optim_args=None, optim_target_modules=None, output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_token_specific_3_epoch_scaled_3times, overwrite_hub_revision=False, overwrite_output_dir=True, packing=False, pad_to_multiple_of=None, pad_token=, padding_free=False, past_index=-1, per_device_eval_batch_size=16, per_device_train_batch_size=4, prediction_loss_only=False, push_to_hub=True, push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_revision=False, push_to_hub_token=, ray_scope=last, reduction=sum, remove_unused_columns=True, report_to=['wandb'], restore_callback_states_from_checkpoint=False, resume_from_checkpoint=None, run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_token_specific_3_epoch_scaled_3times, save_on_each_node=False, save_only_model=False, save_safetensors=True, save_steps=100, save_strategy=SaveStrategy.STEPS, save_total_limit=1, seed=1234, skip_memory_metrics=True, system_prompt=None, teacher_model_init_kwargs=None, teacher_model_name_or_path=None, temperature=0.9, tf32=None, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, torch_empty_cache_steps=None, torchdynamo=None, tpu_metrics_debug=False, tpu_num_cores=None, use_cpu=False, use_ipex=False, use_legacy_prediction_loop=False, use_liger=False, use_liger_kernel=False, use_mps_device=False, wandb_entity=None, wandb_project=None, warmup_ratio=0.1, warmup_steps=0, weight_decay=0.0, ) 2025-06-30 08:18:55 - INFO - __main__ - *** Initializing model kwargs *** 2025-06-30 08:18:55 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_token_specific_3_epoch_scaled_3times/top_6_experts_lmms-lab_Math10K.json: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]} 2025-06-30 08:18:55 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 Memory reserved: 0.0 2025-06-30 08:19:06 - INFO - __main__ - Model memory after loading model:Memory allocated: 0.0 Memory reserved: 0.0 2025-06-30 08:19:06 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... 2025-06-30 08:19:19 - INFO - __main__ - MoE layers replaced with Dense MLP layers 2025-06-30 08:19:19 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 0.0 Memory reserved: 0.0 2025-06-30 08:19:19 - INFO - __main__ - Initializing EfficientDistillationTrainer... 2025-06-30 08:19:45 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 31126.0048828125 Memory reserved: 36896.0 2025-06-30 08:19:45 - INFO - __main__ - *** Starting training *** 2025-06-30 08:19:45 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( (model): DeepseekV2Model( (embed_tokens): Embedding(102400, 2048) (layers): ModuleList( (0): DeepseekV2DecoderLayer( (self_attn): DeepseekV2FlashAttention2( (q_proj): Linear(in_features=2048, out_features=3072, bias=False) (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) (kv_a_layernorm): DeepseekV2RMSNorm() (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) (o_proj): Linear(in_features=2048, out_features=2048, bias=False) (rotary_emb): DeepseekV2YarnRotaryEmbedding() ) (mlp): DeepseekV2MLP( (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) (up_proj): Linear(in_features=2048, out_features=10944, bias=False) (down_proj): Linear(in_features=10944, out_features=2048, bias=False) (act_fn): SiLU() ) (input_layernorm): DeepseekV2RMSNorm() (post_attention_layernorm): DeepseekV2RMSNorm() ) (1-26): 26 x DeepseekV2DecoderLayer( (self_attn): DeepseekV2FlashAttention2( (q_proj): Linear(in_features=2048, out_features=3072, bias=False) (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) (kv_a_layernorm): DeepseekV2RMSNorm() (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) (o_proj): Linear(in_features=2048, out_features=2048, bias=False) (rotary_emb): DeepseekV2YarnRotaryEmbedding() ) (mlp): DeepseekV2MoE( (gate): MoEGate() (shared_experts): DeepseekV2MLP( (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) (up_proj): Linear(in_features=2048, out_features=2816, bias=False) (down_proj): Linear(in_features=2816, out_features=2048, bias=False) (act_fn): SiLU() ) (selected_experts): ModuleList( (0-5): 6 x DeepseekV2MLP( (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) (up_proj): Linear(in_features=2048, out_features=1408, bias=False) (down_proj): Linear(in_features=1408, out_features=2048, bias=False) (act_fn): SiLU() ) ) (experts): ModuleList() ) (input_layernorm): DeepseekV2RMSNorm() (post_attention_layernorm): DeepseekV2RMSNorm() ) ) (norm): DeepseekV2RMSNorm() ) (lm_head): Linear(in_features=2048, out_features=102400, bias=False) )