|
{ |
|
"i2v": true, |
|
"use_audio": true, |
|
"random_prefix_frames": true, |
|
"sp_size": 1, |
|
"text_encoder_path": "/tmp/pretrained_models/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth", |
|
"image_encoder_path": "None", |
|
"dit_path": "/tmp/pretrained_models/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors,/tmp/pretrained_models/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors,/tmp/pretrained_models/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors,/tmp/pretrained_models/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors,/tmp/pretrained_models/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors,/tmp/pretrained_models/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors", |
|
"model_config": { |
|
"in_dim": 33, |
|
"audio_hidden_size": 32 |
|
}, |
|
"train_architecture": "lora", |
|
"lora_target_modules": "q,k,v,o,ffn.0,ffn.2", |
|
"init_lora_weights": "kaiming", |
|
"lora_rank": 128, |
|
"lora_alpha": 64.0 |
|
} |
|
|
|
|