Xsong123 commited on
Commit
2b27df8
·
verified ·
1 Parent(s): 6f6b310

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +25 -20
README.md CHANGED
@@ -30,40 +30,45 @@ Here are some examples of images generated using this style LoRA:
30
  ![Clay Toy Style Example](./example-4.png)
31
  ![Clay Toy Style Example](./example-5.png)
32
  ![Clay Toy Style Example](./example-6.png)
33
- ![Clay Toy Style Example](./example-7.png)
34
 
35
  ## Inference Example
36
  ```python
37
- from huggingface_hub import hf_hub_download
38
  from diffusers import FluxKontextPipeline
39
  from diffusers.utils import load_image
40
  import torch
41
 
42
- # Define the style and model details
43
- STYLE_NAME = "Clay_Toy"
44
- LORA_FILENAME = "Clay_Toy_lora_weights.safetensors"
45
- REPO_ID = "Kontext-Style/Clay_Toy_lora"
 
46
 
47
- # Download the LoRA weights
48
- # Make sure you have created a folder named 'LoRAs' in your current directory
49
- hf_hub_download(repo_id=REPO_ID, filename=LORA_FILENAME, local_dir="./LoRAs")
50
 
51
- # Load an image
52
  image = load_image("https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg").resize((1024, 1024))
53
 
54
- # Load the pipeline
55
- pipeline = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to('cuda')
56
-
57
- # Load and set the LoRA adapter
58
- pipeline.load_lora_weights(f"./LoRAs/{LORA_FILENAME}", adapter_name="lora")
59
- pipeline.set_adapters(["lora"], adapter_weights=[1])
60
 
61
  # Run inference
62
- prompt = f"Turn this image into the {STYLE_NAME.replace('_', ' ')} style."
63
- result_image = pipeline(image=image, prompt=prompt, height=1024, width=1024, num_inference_steps=24).images[0]
64
- result_image.save(f"{STYLE_NAME}.png")
 
 
 
 
 
 
 
 
65
 
66
- print(f"Image saved as {STYLE_NAME}.png")
67
  ```
68
 
69
  Feel free to open an issue or contact us for feedback or collaboration!
 
30
  ![Clay Toy Style Example](./example-4.png)
31
  ![Clay Toy Style Example](./example-5.png)
32
  ![Clay Toy Style Example](./example-6.png)
 
33
 
34
  ## Inference Example
35
  ```python
 
36
  from diffusers import FluxKontextPipeline
37
  from diffusers.utils import load_image
38
  import torch
39
 
40
+ # Load the base pipeline
41
+ pipeline = FluxKontextPipeline.from_pretrained(
42
+ "black-forest-labs/FLUX.1-Kontext-dev",
43
+ torch_dtype=torch.bfloat16
44
+ ).to('cuda')
45
 
46
+ # Load the LoRA adapter for the Clay Toy style directly from the Hub
47
+ pipeline.load_lora_weights("Kontext-Style/Clay_Toy_lora", weight_name="Clay_Toy_lora_weights.safetensors", adapter_name="lora")
48
+ pipeline.set_adapters(["lora"], adapter_weights=[1])
49
 
50
+ # Load a source image (you can use any image)
51
  image = load_image("https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg").resize((1024, 1024))
52
 
53
+ # Prepare the prompt
54
+ # The style_name is used in the prompt and for the output filename.
55
+ style_name = "Clay Toy"
56
+ prompt = f"Turn this image into the Clay_Toy style."
 
 
57
 
58
  # Run inference
59
+ result_image = pipeline(
60
+ image=image,
61
+ prompt=prompt,
62
+ height=1024,
63
+ width=1024,
64
+ num_inference_steps=24
65
+ ).images[0]
66
+
67
+ # Save the result
68
+ output_filename = f"{style_name.replace(' ', '_')}.png"
69
+ result_image.save(output_filename)
70
 
71
+ print(f"Image saved as {output_filename}")
72
  ```
73
 
74
  Feel free to open an issue or contact us for feedback or collaboration!