Revlon Carter
commited on
Update README.md
Browse files
README.md
CHANGED
|
@@ -171,35 +171,36 @@ For any licensing questions, contact us at ```lambda.go.company@gmail.com```
|
|
| 171 |
**Prerequisites:**
|
| 172 |
- **Install necessary libraries:**
|
| 173 |
```
|
| 174 |
-
pip install transformers diffusers torch Pillow huggingface_hub
|
| 175 |
```
|
| 176 |
- **Code to Use the Model:**
|
| 177 |
|
| 178 |
```
|
| 179 |
-
from transformers import AutoTokenizer
|
| 180 |
-
from diffusers import DiffusionPipeline
|
| 181 |
import torch
|
| 182 |
from PIL import Image
|
| 183 |
import requests
|
| 184 |
from io import BytesIO
|
| 185 |
|
| 186 |
# Your Hugging Face API token
|
| 187 |
-
API_TOKEN = "
|
| 188 |
|
| 189 |
# Load the model and tokenizer from Hugging Face
|
| 190 |
model_name = "future-technologies/Floral-High-Dynamic-Range"
|
|
|
|
| 191 |
|
| 192 |
# Error handling for model loading
|
| 193 |
try:
|
| 194 |
-
model = AutoModelForImageGeneration.from_pretrained(model_name, use_auth_token=API_TOKEN)
|
| 195 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
| 196 |
except Exception as e:
|
| 197 |
print(f"Error loading model: {e}")
|
| 198 |
exit()
|
| 199 |
|
| 200 |
# Initialize the diffusion pipeline
|
| 201 |
try:
|
| 202 |
-
pipe = FluxPipeline.from_pretrained(model_name
|
| 203 |
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 204 |
except Exception as e:
|
| 205 |
print(f"Error initializing pipeline: {e}")
|
|
|
|
| 171 |
**Prerequisites:**
|
| 172 |
- **Install necessary libraries:**
|
| 173 |
```
|
| 174 |
+
pip install transformers diffusers torch Pillow huggingface_hub PIL io requests
|
| 175 |
```
|
| 176 |
- **Code to Use the Model:**
|
| 177 |
|
| 178 |
```
|
| 179 |
+
from transformers import AutoTokenizer #, AutoModelForImageGeneration
|
| 180 |
+
from diffusers import DiffusionPipeline, FluxPipeline
|
| 181 |
import torch
|
| 182 |
from PIL import Image
|
| 183 |
import requests
|
| 184 |
from io import BytesIO
|
| 185 |
|
| 186 |
# Your Hugging Face API token
|
| 187 |
+
API_TOKEN = "<retacted>"
|
| 188 |
|
| 189 |
# Load the model and tokenizer from Hugging Face
|
| 190 |
model_name = "future-technologies/Floral-High-Dynamic-Range"
|
| 191 |
+
#model_name = "black-forest-labs/FLUX.1-dev"
|
| 192 |
|
| 193 |
# Error handling for model loading
|
| 194 |
try:
|
| 195 |
+
#model = AutoModelForImageGeneration.from_pretrained(model_name, use_auth_token=API_TOKEN)
|
| 196 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=API_TOKEN)
|
| 197 |
except Exception as e:
|
| 198 |
print(f"Error loading model: {e}")
|
| 199 |
exit()
|
| 200 |
|
| 201 |
# Initialize the diffusion pipeline
|
| 202 |
try:
|
| 203 |
+
pipe = FluxPipeline.from_pretrained(model_name)
|
| 204 |
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 205 |
except Exception as e:
|
| 206 |
print(f"Error initializing pipeline: {e}")
|