Update README.md
Browse files
README.md
CHANGED
|
@@ -52,53 +52,52 @@ While the current version (v1) produces stylistic, slightly "painterly" or "pixe
|
|
| 52 |
## ๐ ๏ธ How to use
|
| 53 |
```python
|
| 54 |
import torch
|
| 55 |
-
from transformers import AutoTokenizer, CLIPTextModel, AutoConfig
|
| 56 |
-
from huggingface_hub import hf_hub_download
|
| 57 |
-
from safetensors.torch import load_file
|
| 58 |
import matplotlib.pyplot as plt
|
| 59 |
import numpy as np
|
| 60 |
-
|
| 61 |
-
|
|
|
|
| 62 |
|
| 63 |
def generate_fixed_from_hub(prompt, model_id="TopAI-1/Pixel-1"):
|
| 64 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 65 |
print(f"๐ Working on {device}...")
|
| 66 |
|
| 67 |
-
# 1.
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
| 71 |
|
| 72 |
-
# 2.
|
| 73 |
tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
| 74 |
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
|
| 75 |
|
| 76 |
-
# 3.
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
# Loading
|
| 85 |
-
model.load_state_dict(clean_sd, strict=False)
|
| 86 |
-
model.to(device).eval()
|
| 87 |
-
print("โ
Weights loaded perfectly!")
|
| 88 |
|
| 89 |
-
# 4.
|
|
|
|
| 90 |
inputs = tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt").to(device)
|
|
|
|
| 91 |
with torch.no_grad():
|
| 92 |
emb = text_encoder(inputs.input_ids).pooler_output
|
| 93 |
out = model(emb)
|
| 94 |
|
| 95 |
-
# 5.
|
| 96 |
img = (out.squeeze(0).cpu().permute(1, 2, 0).numpy() + 1.0) / 2.0
|
| 97 |
plt.figure(figsize=(8, 8))
|
| 98 |
plt.imshow(np.clip(img, 0, 1))
|
| 99 |
plt.axis('off')
|
| 100 |
-
plt.title(prompt)
|
| 101 |
plt.show()
|
| 102 |
|
| 103 |
-
#
|
| 104 |
generate_fixed_from_hub("Window with metal bars and fence shadow")
|
|
|
|
| 52 |
## ๐ ๏ธ How to use
|
| 53 |
```python
|
| 54 |
import torch
|
|
|
|
|
|
|
|
|
|
| 55 |
import matplotlib.pyplot as plt
|
| 56 |
import numpy as np
|
| 57 |
+
import os
|
| 58 |
+
import shutil
|
| 59 |
+
from transformers import AutoTokenizer, CLIPTextModel, AutoModel, AutoConfig
|
| 60 |
|
| 61 |
def generate_fixed_from_hub(prompt, model_id="TopAI-1/Pixel-1"):
|
| 62 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 63 |
print(f"๐ Working on {device}...")
|
| 64 |
|
| 65 |
+
# 1. ื ืืงืื Cache ืืื ืืืืื ืฉืืชื ืืืฉื ืืช ืืชืืงืื ืื ืืืืฉืื ืื-Hub
|
| 66 |
+
cache_path = os.path.expanduser(f"~/.cache/huggingface/hub/models--{model_id.replace('/', '--')}")
|
| 67 |
+
if os.path.exists(cache_path):
|
| 68 |
+
print("๐งน Clearing old cache to fetch your latest fixes...")
|
| 69 |
+
shutil.rmtree(cache_path)
|
| 70 |
|
| 71 |
+
# 2. ืืขืื ืช CLIP
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
| 73 |
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
|
| 74 |
|
| 75 |
+
# 3. ืืขืื ืช ืืืืื ืืืงืื ืคืื ืืืืืืืืช ืื-Hub
|
| 76 |
+
# ืืืืืช ื-auto_map ื-config.json, transformers ืืืฆื ืืื ืืช ืืืืืงืืช
|
| 77 |
+
print("๐ฅ Downloading architecture and weights directly from Hub...")
|
| 78 |
+
model = AutoModel.from_pretrained(
|
| 79 |
+
model_id,
|
| 80 |
+
trust_remote_code=True,
|
| 81 |
+
force_download=True
|
| 82 |
+
).to(device)
|
| 83 |
|
| 84 |
+
model.eval()
|
| 85 |
+
print("โ
Model loaded successfully!")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
+
# 4. ืืฆืืจื
|
| 88 |
+
print(f"๐จ Generating: {prompt}")
|
| 89 |
inputs = tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt").to(device)
|
| 90 |
+
|
| 91 |
with torch.no_grad():
|
| 92 |
emb = text_encoder(inputs.input_ids).pooler_output
|
| 93 |
out = model(emb)
|
| 94 |
|
| 95 |
+
# 5. ืชืฆืืื
|
| 96 |
img = (out.squeeze(0).cpu().permute(1, 2, 0).numpy() + 1.0) / 2.0
|
| 97 |
plt.figure(figsize=(8, 8))
|
| 98 |
plt.imshow(np.clip(img, 0, 1))
|
| 99 |
plt.axis('off')
|
|
|
|
| 100 |
plt.show()
|
| 101 |
|
| 102 |
+
# ืืจืฆื
|
| 103 |
generate_fixed_from_hub("Window with metal bars and fence shadow")
|