chestnutlzj commited on
Commit
5195675
·
verified ·
1 Parent(s): 849a228

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +20 -29
README.md CHANGED
@@ -2,7 +2,6 @@
2
  license: apache-2.0
3
  language:
4
  - en
5
- - zh
6
  library_name: diffusers
7
  pipeline_tag: image-to-image
8
  ---
@@ -16,39 +15,31 @@ pipeline_tag: image-to-image
16
  # Usage
17
 
18
  ```python
19
- import os
20
  import torch
21
- from PIL import Image
22
- from diffusers import QwenImageEditPlusPipeline
23
 
24
- pipeline = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", torch_dtype=torch.bfloat16)
25
- print("pipeline loaded")
26
 
27
- pipeline.load_lora_weights(
28
  "chestnutlzj/Edit-R1-Qwen-Image-Edit-2509",
29
  adapter_name="lora",
30
  )
31
- pipeline.set_adapters(["lora"], adapter_weights=[1])
32
-
33
- pipeline.to('cuda')
34
- pipeline.set_progress_bar_config(disable=None)
35
- image1 = Image.open("input1.png")
36
- image2 = Image.open("input2.png")
37
- prompt = "The magician bear is on the left, the alchemist bear is on the right, facing each other in the central park square."
38
- inputs = {
39
- "image": [image1, image2],
40
- "prompt": prompt,
41
- "generator": torch.manual_seed(0),
42
- "true_cfg_scale": 4.0,
43
- "negative_prompt": " ",
44
- "num_inference_steps": 40,
45
- "guidance_scale": 1.0,
46
- "num_images_per_prompt": 1,
47
- }
48
- with torch.inference_mode():
49
- output = pipeline(**inputs)
50
- output_image = output.images[0]
51
- output_image.save("output_image_edit_plus.png")
52
- print("image saved at", os.path.abspath("output_image_edit_plus.png"))
53
 
54
  ```
 
 
 
 
 
2
  license: apache-2.0
3
  language:
4
  - en
 
5
  library_name: diffusers
6
  pipeline_tag: image-to-image
7
  ---
 
15
  # Usage
16
 
17
  ```python
 
18
  import torch
19
+ from diffusers import FluxKontextPipeline
20
+ from diffusers.utils import load_image
21
 
22
+ pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16)
 
23
 
24
+ pipe.load_lora_weights(
25
  "chestnutlzj/Edit-R1-Qwen-Image-Edit-2509",
26
  adapter_name="lora",
27
  )
28
+ pipe.set_adapters(["lora"], adapter_weights=[1])
29
+
30
+ pipe.to("cuda")
31
+
32
+
33
+ input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
34
+
35
+ image = pipe(
36
+ image=input_image,
37
+ prompt="Add a hat to the cat",
38
+ guidance_scale=2.5
39
+ ).images[0]
 
 
 
 
 
 
 
 
 
 
40
 
41
  ```
42
+
43
+ # Licence
44
+
45
+ FLUX.1-Kontext-dev falls under the [FLUX.1 [dev] Non-Commercial License](https://github.com/black-forest-labs/flux/blob/main/model_licenses/LICENSE-FLUX1-dev).