mikaelh commited on
Commit
5ee2221
·
1 Parent(s): eeb429c

Checkpoint at 250 steps

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,133 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ base_model: "Qwen/Qwen-Image"
4
+ tags:
5
+ - qwen_image
6
+ - qwen_image-diffusers
7
+ - text-to-image
8
+ - image-to-image
9
+ - diffusers
10
+ - simpletuner
11
+ - not-for-all-audiences
12
+ - lora
13
+
14
+ - template:sd-lora
15
+ - standard
16
+ pipeline_tag: text-to-image
17
+ inference: true
18
+ widget:
19
+ - text: 'An domokun in minecraft style.'
20
+ parameters:
21
+ negative_prompt: 'ugly, cropped, blurry, low-quality, mediocre average'
22
+ output:
23
+ url: ./assets/image_0_0.png
24
+ ---
25
+
26
+ # simpletuner-example-qwen_image-peft-lora
27
+
28
+ This is a PEFT LoRA derived from [Qwen/Qwen-Image](https://huggingface.co/Qwen/Qwen-Image).
29
+
30
+ The main validation prompt used during training was:
31
+ ```
32
+ An domokun in minecraft style.
33
+ ```
34
+
35
+
36
+ ## Validation settings
37
+ - CFG: `4.0`
38
+ - CFG Rescale: `0.0`
39
+ - Steps: `30`
40
+ - Sampler: `FlowMatchEulerDiscreteScheduler`
41
+ - Seed: `42`
42
+ - Resolution: `1024x1024`
43
+
44
+
45
+ Note: The validation settings are not necessarily the same as the [training settings](#training-settings).
46
+
47
+ You can find some example images in the following gallery:
48
+
49
+
50
+ <Gallery />
51
+
52
+ The text encoder **was not** trained.
53
+ You may reuse the base model text encoder for inference.
54
+
55
+
56
+ ## Training settings
57
+
58
+ - Training epochs: 9
59
+ - Training steps: 250
60
+ - Learning rate: 0.0001
61
+ - Learning rate schedule: constant_with_warmup
62
+ - Warmup steps: 100
63
+ - Max grad value: 0.01
64
+ - Effective batch size: 1
65
+ - Micro-batch size: 1
66
+ - Gradient accumulation steps: 1
67
+ - Number of GPUs: 1
68
+ - Gradient checkpointing: True
69
+ - Prediction type: flow_matching[]
70
+ - Optimizer: optimi-lion
71
+ - Trainable parameter precision: Pure BF16
72
+ - Base model precision: `int8-quanto`
73
+ - Caption dropout probability: 0.0%
74
+
75
+
76
+ - LoRA Rank: 8
77
+ - LoRA Alpha: 8.0
78
+ - LoRA Dropout: 0.1
79
+ - LoRA initialisation style: default
80
+ - LoRA mode: Standard
81
+
82
+
83
+ ## Datasets
84
+
85
+ ### dreambooth-1024
86
+ - Repeats: 0
87
+ - Total number of images: 26
88
+ - Total number of aspect buckets: 1
89
+ - Resolution: 1.048576 megapixels
90
+ - Cropped: True
91
+ - Crop style: random
92
+ - Crop aspect: square
93
+ - Used for regularisation data: No
94
+
95
+
96
+ ## Inference
97
+
98
+
99
+ ```python
100
+ import torch
101
+ from diffusers import DiffusionPipeline
102
+
103
+ model_id = 'Qwen/Qwen-Image'
104
+ adapter_id = 'simpletuner-example-qwen_image-peft-lora'
105
+ pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) # loading directly in bf16
106
+ pipeline.load_lora_weights(adapter_id)
107
+
108
+ prompt = "An domokun in minecraft style."
109
+ negative_prompt = 'ugly, cropped, blurry, low-quality, mediocre average'
110
+
111
+ ## Optional: quantise the model to save on vram.
112
+ ## Note: The model was quantised during training, and so it is recommended to do the same during inference time.
113
+ from optimum.quanto import quantize, freeze, qint8
114
+ quantize(pipeline.transformer, weights=qint8)
115
+ freeze(pipeline.transformer)
116
+
117
+ pipeline.to('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu') # the pipeline is already in its target precision level
118
+ model_output = pipeline(
119
+ prompt=prompt,
120
+ negative_prompt=negative_prompt,
121
+ num_inference_steps=30,
122
+ generator=torch.Generator(device='cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu').manual_seed(42),
123
+ width=1024,
124
+ height=1024,
125
+ guidance_scale=4.0,
126
+ ).images[0]
127
+
128
+ model_output.save("output.png", format="PNG")
129
+
130
+ ```
131
+
132
+
133
+
assets/image_0_0.png ADDED

Git LFS Details

  • SHA256: 5a59bd90cf08f7bfba3620f4a1a1bb1a29cb2716447fc5e710386ed69f78d836
  • Pointer size: 132 Bytes
  • Size of remote file: 2.38 MB
optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:795d78330cb4804f1c740618f5d8cb601b17e16cc8884c9d47fb677c50a6d7ff
3
+ size 47467915
pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:624788e67f07796cfb5e74e6735dd2150099239b0f8249d3d4b19ebeabae1b15
3
+ size 23655824
random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5b31014e0091952ce994169c3eadc8fb6e327066ab3c61b994cd6ada6888c89
3
+ size 14757
scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db38142dfbc297aaf07b39e2404980521f44adccd3b871d12fb14d2f143a693e
3
+ size 1401
simpletuner_config.json ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "snr_gamma": null,
3
+ "use_soft_min_snr": false,
4
+ "soft_min_snr_sigma_data": null,
5
+ "model_family": "qwen_image",
6
+ "model_flavour": "v1.0",
7
+ "model_type": "lora",
8
+ "loss_type": "l2",
9
+ "huber_schedule": "snr",
10
+ "huber_c": 0.1,
11
+ "hidream_use_load_balancing_loss": false,
12
+ "hidream_load_balancing_loss_weight": null,
13
+ "flux_lora_target": "all",
14
+ "flow_sigmoid_scale": 1.0,
15
+ "flux_fast_schedule": false,
16
+ "flow_use_uniform_schedule": false,
17
+ "flow_use_beta_schedule": false,
18
+ "flow_beta_schedule_alpha": 2.0,
19
+ "flow_beta_schedule_beta": 2.0,
20
+ "flow_schedule_shift": 0.0,
21
+ "flow_schedule_auto_shift": true,
22
+ "flux_guidance_mode": "constant",
23
+ "flux_guidance_value": 1.0,
24
+ "flux_guidance_min": 0.0,
25
+ "flux_guidance_max": 4.0,
26
+ "flux_attention_masked_training": false,
27
+ "ltx_train_mode": "i2v",
28
+ "ltx_i2v_prob": 0.1,
29
+ "ltx_protect_first_frame": false,
30
+ "ltx_partial_noise_fraction": 0.05,
31
+ "t5_padding": "unmodified",
32
+ "sd3_clip_uncond_behaviour": "empty_string",
33
+ "sd3_t5_uncond_behaviour": null,
34
+ "lora_type": "standard",
35
+ "peft_lora_mode": "standard",
36
+ "singlora_ramp_up_steps": 0,
37
+ "lora_init_type": "default",
38
+ "init_lora": null,
39
+ "lora_rank": 8,
40
+ "lora_alpha": 8.0,
41
+ "lora_dropout": 0.1,
42
+ "lycoris_config": "config/lycoris_config.json",
43
+ "init_lokr_norm": null,
44
+ "conditioning_multidataset_sampling": "random",
45
+ "control": false,
46
+ "controlnet": false,
47
+ "controlnet_custom_config": null,
48
+ "tread_config": null,
49
+ "controlnet_model_name_or_path": null,
50
+ "pretrained_model_name_or_path": "Qwen/Qwen-Image",
51
+ "pretrained_transformer_model_name_or_path": null,
52
+ "pretrained_transformer_subfolder": "transformer",
53
+ "pretrained_unet_model_name_or_path": null,
54
+ "pretrained_unet_subfolder": "unet",
55
+ "pretrained_vae_model_name_or_path": "Qwen/Qwen-Image",
56
+ "pretrained_t5_model_name_or_path": null,
57
+ "prediction_type": "flow_matching",
58
+ "snr_weight": 1.0,
59
+ "training_scheduler_timestep_spacing": "trailing",
60
+ "inference_scheduler_timestep_spacing": "trailing",
61
+ "refiner_training": false,
62
+ "refiner_training_invert_schedule": false,
63
+ "refiner_training_strength": 0.2,
64
+ "timestep_bias_strategy": "none",
65
+ "timestep_bias_multiplier": 1.0,
66
+ "timestep_bias_begin": 0,
67
+ "timestep_bias_end": 1000,
68
+ "timestep_bias_portion": 0.25,
69
+ "disable_segmented_timestep_sampling": false,
70
+ "rescale_betas_zero_snr": false,
71
+ "vae_dtype": "bf16",
72
+ "vae_batch_size": 1,
73
+ "vae_enable_tiling": false,
74
+ "vae_enable_slicing": false,
75
+ "vae_cache_scan_behaviour": "recreate",
76
+ "vae_cache_ondemand": false,
77
+ "compress_disk_cache": false,
78
+ "aspect_bucket_disable_rebuild": false,
79
+ "keep_vae_loaded": false,
80
+ "skip_file_discovery": "",
81
+ "revision": null,
82
+ "variant": null,
83
+ "preserve_data_backend_cache": false,
84
+ "use_dora": false,
85
+ "override_dataset_config": false,
86
+ "cache_dir_text": "cache",
87
+ "cache_dir_vae": "",
88
+ "data_backend_config": "config/examples/multidatabackend-small-dreambooth-1024px.json",
89
+ "data_backend_sampling": "auto-weighting",
90
+ "ignore_missing_files": false,
91
+ "write_batch_size": 128,
92
+ "read_batch_size": 25,
93
+ "image_processing_batch_size": 32,
94
+ "enable_multiprocessing": false,
95
+ "max_workers": 32,
96
+ "aws_max_pool_connections": 128,
97
+ "torch_num_threads": 8,
98
+ "dataloader_prefetch": false,
99
+ "dataloader_prefetch_qlen": 10,
100
+ "aspect_bucket_worker_count": 12,
101
+ "cache_dir": "output/examples/qwen_image.peft-lora/cache",
102
+ "cache_clear_validation_prompts": false,
103
+ "caption_strategy": "filename",
104
+ "parquet_caption_column": null,
105
+ "parquet_filename_column": null,
106
+ "instance_prompt": null,
107
+ "output_dir": "output/examples/qwen_image.peft-lora",
108
+ "seed": 42,
109
+ "seed_for_each_device": true,
110
+ "framerate": null,
111
+ "resolution": 1024.0,
112
+ "resolution_type": "pixel_area",
113
+ "aspect_bucket_rounding": null,
114
+ "aspect_bucket_alignment": 32,
115
+ "minimum_image_size": 0.0,
116
+ "maximum_image_size": null,
117
+ "target_downsample_size": null,
118
+ "train_text_encoder": false,
119
+ "tokenizer_max_length": null,
120
+ "train_batch_size": 1,
121
+ "num_train_epochs": 77,
122
+ "max_train_steps": 2000,
123
+ "ignore_final_epochs": true,
124
+ "checkpointing_steps": 50,
125
+ "checkpointing_rolling_steps": 0,
126
+ "checkpointing_use_tempdir": false,
127
+ "checkpoints_total_limit": 20,
128
+ "checkpoints_rolling_total_limit": 1,
129
+ "resume_from_checkpoint": null,
130
+ "gradient_accumulation_steps": 1,
131
+ "gradient_checkpointing": true,
132
+ "gradient_checkpointing_interval": null,
133
+ "learning_rate": 0.0001,
134
+ "text_encoder_lr": null,
135
+ "lr_scale": false,
136
+ "lr_scale_sqrt": false,
137
+ "lr_scheduler": "constant_with_warmup",
138
+ "lr_warmup_steps": 100,
139
+ "lr_num_cycles": 1,
140
+ "lr_power": 0.8,
141
+ "distillation_method": null,
142
+ "distillation_config": null,
143
+ "use_ema": false,
144
+ "ema_device": "cpu",
145
+ "ema_validation": "comparison",
146
+ "ema_cpu_only": false,
147
+ "ema_foreach_disable": false,
148
+ "ema_update_interval": null,
149
+ "ema_decay": 0.995,
150
+ "non_ema_revision": null,
151
+ "offload_during_startup": false,
152
+ "offload_param_path": null,
153
+ "optimizer": "optimi-lion",
154
+ "optimizer_config": null,
155
+ "optimizer_cpu_offload_method": "none",
156
+ "optimizer_offload_gradients": false,
157
+ "fuse_optimizer": false,
158
+ "optimizer_beta1": null,
159
+ "optimizer_beta2": null,
160
+ "optimizer_release_gradients": false,
161
+ "adam_beta1": 0.9,
162
+ "adam_beta2": 0.999,
163
+ "adam_weight_decay": 0.01,
164
+ "adam_epsilon": 1e-08,
165
+ "prodigy_steps": null,
166
+ "max_grad_norm": 0.01,
167
+ "grad_clip_method": "value",
168
+ "push_to_hub": false,
169
+ "push_checkpoints_to_hub": false,
170
+ "hub_model_id": "simpletuner-example-qwen_image-peft-lora",
171
+ "model_card_note": null,
172
+ "model_card_safe_for_work": false,
173
+ "logging_dir": "output/examples/qwen_image.peft-lora/logs",
174
+ "disable_benchmark": false,
175
+ "evaluation_type": null,
176
+ "eval_dataset_pooling": false,
177
+ "pretrained_evaluation_model_name_or_path": "openai/clip-vit-large-patch14-336",
178
+ "validation_on_startup": false,
179
+ "validation_seed_source": "cpu",
180
+ "validation_lycoris_strength": 1.0,
181
+ "validation_torch_compile": false,
182
+ "validation_torch_compile_mode": "max-autotune",
183
+ "validation_guidance_skip_layers": null,
184
+ "validation_guidance_skip_layers_start": 0.01,
185
+ "validation_guidance_skip_layers_stop": 0.2,
186
+ "validation_guidance_skip_scale": 2.8,
187
+ "sana_complex_human_instruction": [
188
+ "Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:",
189
+ "- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.",
190
+ "- Do not add detail that is not in the original prompt! Do not mention colors if the User Prompt does not!",
191
+ "- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.",
192
+ "- If the prompt contains explicit material, do not remove this information from the output, which would damage the performance of downstream classifiers.",
193
+ "Here are examples of how to transform or refine prompts:",
194
+ "- User Prompt: A cat sleeping -> Enhanced: A cat sleeping peacefully, showcasing the joy of pet ownership. Cute floof kitty cat gatto.",
195
+ "- User Prompt: A busy city street -> Enhanced: A bustling city street scene featuring a crowd of people.",
196
+ "Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:",
197
+ "User Prompt: "
198
+ ],
199
+ "disable_tf32": false,
200
+ "validation_using_datasets": null,
201
+ "webhook_config": null,
202
+ "webhook_reporting_interval": null,
203
+ "report_to": "none",
204
+ "tracker_run_name": "example-training-run",
205
+ "tracker_project_name": "lora-training",
206
+ "tracker_image_layout": "gallery",
207
+ "validation_prompt": "An domokun in minecraft style.",
208
+ "validation_prompt_library": false,
209
+ "user_prompt_library": null,
210
+ "validation_negative_prompt": "ugly, cropped, blurry, low-quality, mediocre average",
211
+ "num_validation_images": 1,
212
+ "validation_disable": false,
213
+ "validation_steps": 50,
214
+ "validation_stitch_input_location": "left",
215
+ "eval_steps_interval": null,
216
+ "eval_timesteps": 28,
217
+ "num_eval_images": 25,
218
+ "eval_dataset_id": null,
219
+ "validation_num_inference_steps": 30,
220
+ "validation_num_video_frames": null,
221
+ "validation_resolution": "1024x1024",
222
+ "validation_noise_scheduler": null,
223
+ "validation_disable_unconditional": true,
224
+ "enable_watermark": false,
225
+ "mixed_precision": "bf16",
226
+ "gradient_precision": null,
227
+ "quantize_via": "cpu",
228
+ "base_model_precision": "int8-quanto",
229
+ "quantize_activations": false,
230
+ "base_model_default_dtype": "bf16",
231
+ "text_encoder_1_precision": "no_change",
232
+ "text_encoder_2_precision": "no_change",
233
+ "text_encoder_3_precision": "no_change",
234
+ "text_encoder_4_precision": "no_change",
235
+ "local_rank": -1,
236
+ "fuse_qkv_projections": false,
237
+ "attention_mechanism": "diffusers",
238
+ "sageattention_usage": "inference",
239
+ "set_grads_to_none": false,
240
+ "noise_offset": 0.1,
241
+ "noise_offset_probability": 0.25,
242
+ "masked_loss_probability": 1.0,
243
+ "validation_guidance": 4.0,
244
+ "validation_guidance_real": 1.0,
245
+ "validation_no_cfg_until_timestep": 2,
246
+ "validation_guidance_rescale": 0.0,
247
+ "validation_randomize": false,
248
+ "validation_seed": 42,
249
+ "fully_unload_text_encoder": false,
250
+ "freeze_encoder_before": 12,
251
+ "freeze_encoder_after": 17,
252
+ "freeze_encoder_strategy": "after",
253
+ "layer_freeze_strategy": "none",
254
+ "unet_attention_slice": false,
255
+ "print_filenames": false,
256
+ "print_sampler_statistics": false,
257
+ "metadata_update_interval": 3600,
258
+ "debug_aspect_buckets": false,
259
+ "debug_dataset_loader": false,
260
+ "freeze_encoder": true,
261
+ "save_text_encoder": false,
262
+ "text_encoder_limit": 25,
263
+ "prepend_instance_prompt": false,
264
+ "only_instance_prompt": false,
265
+ "data_aesthetic_score": 7.0,
266
+ "sdxl_refiner_uses_full_range": false,
267
+ "caption_dropout_probability": 0.0,
268
+ "delete_unwanted_images": false,
269
+ "delete_problematic_images": false,
270
+ "disable_bucket_pruning": true,
271
+ "offset_noise": false,
272
+ "input_perturbation": 0.0,
273
+ "input_perturbation_steps": 0,
274
+ "lr_end": "4e-7",
275
+ "i_know_what_i_am_doing": false,
276
+ "accelerator_cache_clear_interval": null,
277
+ "vae_path": "Qwen/Qwen-Image",
278
+ "accelerator_project_config": {
279
+ "project_dir": "output/examples/qwen_image.peft-lora",
280
+ "logging_dir": "output/examples/qwen_image.peft-lora/logs",
281
+ "automatic_checkpoint_naming": false,
282
+ "total_limit": null,
283
+ "iteration": 5,
284
+ "save_on_each_node": false
285
+ },
286
+ "process_group_kwargs": {
287
+ "backend": "nccl",
288
+ "init_method": null,
289
+ "timeout": "1:30:00"
290
+ },
291
+ "is_quantized": true,
292
+ "weight_dtype": "torch.bfloat16",
293
+ "disable_accelerator": false,
294
+ "lora_initialisation_style": true,
295
+ "model_type_label": "Qwen-Image",
296
+ "use_deepspeed_optimizer": false,
297
+ "use_deepspeed_scheduler": false,
298
+ "base_weight_dtype": "torch.bfloat16",
299
+ "is_quanto": true,
300
+ "is_torchao": false,
301
+ "is_bnb": false,
302
+ "flow_matching": true,
303
+ "vae_kwargs": {
304
+ "pretrained_model_name_or_path": "Qwen/Qwen-Image",
305
+ "subfolder": "vae",
306
+ "revision": null,
307
+ "force_upcast": false,
308
+ "variant": null
309
+ },
310
+ "enable_adamw_bf16": true,
311
+ "overrode_max_train_steps": false,
312
+ "total_num_batches": 26,
313
+ "num_update_steps_per_epoch": 26,
314
+ "total_batch_size": 1,
315
+ "is_schedulefree": false,
316
+ "is_lr_scheduler_disabled": false,
317
+ "total_steps_remaining_at_start": 2000
318
+ }
training_state-dreambooth-1024.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"aspect_ratio_bucket_indices": {"1.0": ["0.jpg", "1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg", "8.jpg", "9.jpg", "10.jpg", "11.jpg", "12.jpg", "13.jpg", "14.jpg", "15.jpg", "16.jpg", "17.jpg", "18.jpg", "19.jpg", "20.jpg", "21.jpg", "22.jpg", "23.jpg", "24.jpg", "25.jpg"]}, "buckets": ["1.0"], "exhausted_buckets": [], "batch_size": 1, "current_bucket": 0, "seen_images": {"4.jpg": true, "14.jpg": true, "6.jpg": true, "10.jpg": true, "18.jpg": true, "9.jpg": true, "2.jpg": true, "21.jpg": true, "25.jpg": true, "5.jpg": true, "1.jpg": true, "19.jpg": true, "16.jpg": true, "0.jpg": true, "7.jpg": true, "12.jpg": true}, "current_epoch": 10}
training_state.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"global_step": 250, "epoch_step": 259, "epoch": 10, "exhausted_backends": [], "repeats": {"dreambooth-1024": 0}}