Instructions to use ayushtues/blipdiffusion with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use ayushtues/blipdiffusion with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("ayushtues/blipdiffusion", dtype=torch.bfloat16, device_map="cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt).images[0] - Notebooks
- Google Colab
- Kaggle
Upload config.json
Browse files- text_encoder/config.json +3 -3
text_encoder/config.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "
|
| 3 |
"architectures": [
|
| 4 |
-
"
|
| 5 |
],
|
| 6 |
"attention_dropout": 0.0,
|
| 7 |
"bos_token_id": 0,
|
|
@@ -20,6 +20,6 @@
|
|
| 20 |
"pad_token_id": 1,
|
| 21 |
"projection_dim": 768,
|
| 22 |
"torch_dtype": "float32",
|
| 23 |
-
"transformers_version": "4.
|
| 24 |
"vocab_size": 49408
|
| 25 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "cache/models--runwayml--stable-diffusion-v1-5",
|
| 3 |
"architectures": [
|
| 4 |
+
"ContextCLIPTextModel"
|
| 5 |
],
|
| 6 |
"attention_dropout": 0.0,
|
| 7 |
"bos_token_id": 0,
|
|
|
|
| 20 |
"pad_token_id": 1,
|
| 21 |
"projection_dim": 768,
|
| 22 |
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.32.1",
|
| 24 |
"vocab_size": 49408
|
| 25 |
}
|