peteromallet commited on
Commit
8c929b0
·
verified ·
1 Parent(s): 9798e4a

Upload strip_lora_extras.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. strip_lora_extras.py +62 -0
strip_lora_extras.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ from safetensors.torch import load_file, save_file
4
+ from collections import OrderedDict # Use OrderedDict to maintain key order if desired
5
+
6
+ def strip_lora_extra_keys(lora_path, output_path):
7
+ """
8
+ Loads a LoRA file and saves a new version containing only
9
+ '.lora_down.weight' and '.lora_up.weight' keys,
10
+ preserving their original module paths and the 'diffusion_model.' prefix.
11
+ All tensors will be saved in float16.
12
+ """
13
+ try:
14
+ source_state_dict = load_file(lora_path)
15
+ print(f"Successfully loaded LoRA from: {lora_path} ({len(source_state_dict)} original keys)")
16
+ except Exception as e:
17
+ print(f"Error loading LoRA file '{lora_path}': {e}")
18
+ return
19
+
20
+ stripped_state_dict = OrderedDict()
21
+ kept_keys_count = 0
22
+ discarded_keys_count = 0
23
+
24
+ for key, tensor in source_state_dict.items():
25
+ # We expect keys to already be in the 'diffusion_model.<...>.lora_down.weight' format
26
+ if key.endswith(".lora_down.weight") or key.endswith(".lora_up.weight"):
27
+ if tensor.is_floating_point():
28
+ stripped_state_dict[key] = tensor.to(torch.float16)
29
+ else: # Should not happen for these weights
30
+ stripped_state_dict[key] = tensor
31
+ print(f"Warning: Tensor {key} was not floating point, dtype not changed.")
32
+ kept_keys_count += 1
33
+ else:
34
+ discarded_keys_count += 1
35
+ # print(f"Discarded key: {key}") # Uncomment for verbose output
36
+
37
+ print(f"\nStripping complete.")
38
+ print(f"Kept {kept_keys_count} keys (lora_down.weight / lora_up.weight).")
39
+ print(f"Discarded {discarded_keys_count} other keys (e.g., .diff_b, .diff, etc.).")
40
+
41
+ if stripped_state_dict:
42
+ print(f"Output dictionary has {len(stripped_state_dict)} keys.")
43
+ print(f"Now attempting to save the stripped LoRA to: {output_path}...")
44
+ try:
45
+ save_file(stripped_state_dict, output_path)
46
+ print(f"\nSuccessfully saved stripped LoRA to: {output_path}")
47
+ except Exception as e:
48
+ print(f"Error saving stripped LoRA file '{output_path}': {e}")
49
+ else:
50
+ print("\nNo '.lora_down.weight' or '.lora_up.weight' keys were found. Output file not saved.")
51
+
52
+
53
+ if __name__ == "__main__":
54
+ parser = argparse.ArgumentParser(
55
+ description="Strips a LoRA file to only keep .lora_down.weight and .lora_up.weight keys, converting to float16.",
56
+ formatter_class=argparse.RawTextHelpFormatter
57
+ )
58
+ parser.add_argument("lora_path", type=str, help="Path to the input LoRA (.safetensors) file to strip.")
59
+ parser.add_argument("output_path", type=str, help="Path to save the stripped LoRA (.safetensors) file.")
60
+ args = parser.parse_args()
61
+
62
+ strip_lora_extra_keys(args.lora_path, args.output_path)