HighCWu commited on
Commit
c1b001f
·
verified ·
1 Parent(s): 6be73e9

Update train_lora_flux_kontext_16gb_jojo_v2.yaml

Browse files
train_lora_flux_kontext_16gb_jojo_v2.yaml CHANGED
@@ -72,7 +72,7 @@ config:
72
  latent_fixed_size: 64 # `64=512/8` means it uses the same amount of vram as a 512x512 image at any resolution.
73
  model:
74
  # huggingface model name or path.
75
- name_or_path: "/HighCWu/FLUX.1-Kontext-dev-bnb-hqq-4bit"
76
  arch: "flux_kontext"
77
  quantize: true # We've already used a quantized model
78
  quantize_te: true
 
72
  latent_fixed_size: 64 # `64=512/8` means it uses the same amount of vram as a 512x512 image at any resolution.
73
  model:
74
  # huggingface model name or path.
75
+ name_or_path: "HighCWu/FLUX.1-Kontext-dev-bnb-hqq-4bit"
76
  arch: "flux_kontext"
77
  quantize: true # We've already used a quantized model
78
  quantize_te: true