Update README.md
Browse files
README.md
CHANGED
|
@@ -18,6 +18,60 @@ You can use the original Qwen-Image parameters as is though I recommend atleast
|
|
| 18 |
|
| 19 |
This model is available for inference at [JustLab.ai](https://justlab.ai)
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
The original Qwen-Image attributions are included verabtim below.
|
| 22 |
|
| 23 |
|
|
|
|
| 18 |
|
| 19 |
This model is available for inference at [JustLab.ai](https://justlab.ai)
|
| 20 |
|
| 21 |
+
```python
|
| 22 |
+
from diffusers import DiffusionPipeline
|
| 23 |
+
import torch
|
| 24 |
+
|
| 25 |
+
model_name = "ovedrive/qwen-image-4bit"
|
| 26 |
+
|
| 27 |
+
# Load the pipeline
|
| 28 |
+
if torch.cuda.is_available():
|
| 29 |
+
torch_dtype = torch.bfloat16
|
| 30 |
+
device = "cuda"
|
| 31 |
+
else:
|
| 32 |
+
torch_dtype = torch.float32
|
| 33 |
+
device = "cpu"
|
| 34 |
+
|
| 35 |
+
pipe = DiffusionPipeline.from_pretrained(model_name, torch_dtype=torch_dtype)
|
| 36 |
+
pipe = pipe.to(device)
|
| 37 |
+
|
| 38 |
+
positive_magic = {
|
| 39 |
+
"en": "Ultra HD, 4K, cinematic composition." # for english prompt,
|
| 40 |
+
"zh": "超清,4K,电影级构图" # for chinese prompt,
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
# Generate image
|
| 44 |
+
prompt = '''A coffee shop entrance features a chalkboard sign reading "Qwen Coffee 😊 $2 per cup," with a neon light beside it displaying "通义千问". Next to it hangs a poster showing a beautiful Chinese woman, and beneath the poster is written "π≈3.1415926-53589793-23846264-33832795-02384197". Ultra HD, 4K, cinematic composition'''
|
| 45 |
+
|
| 46 |
+
negative_prompt = " " # using an empty string if you do not have specific concept to remove
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Generate with different aspect ratios
|
| 50 |
+
aspect_ratios = {
|
| 51 |
+
"1:1": (1328, 1328),
|
| 52 |
+
"16:9": (1664, 928),
|
| 53 |
+
"9:16": (928, 1664),
|
| 54 |
+
"4:3": (1472, 1140),
|
| 55 |
+
"3:4": (1140, 1472),
|
| 56 |
+
"3:2": (1584, 1056),
|
| 57 |
+
"2:3": (1056, 1584),
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
width, height = aspect_ratios["16:9"]
|
| 61 |
+
|
| 62 |
+
image = pipe(
|
| 63 |
+
prompt=prompt + positive_magic["en"],
|
| 64 |
+
negative_prompt=negative_prompt,
|
| 65 |
+
width=width,
|
| 66 |
+
height=height,
|
| 67 |
+
num_inference_steps=20,
|
| 68 |
+
true_cfg_scale=4.0,
|
| 69 |
+
generator=torch.Generator(device="cuda").manual_seed(42)
|
| 70 |
+
).images[0]
|
| 71 |
+
|
| 72 |
+
image.save("example.png")
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
The original Qwen-Image attributions are included verabtim below.
|
| 76 |
|
| 77 |
|