Spaces:
Running
Running
Fixed app.py because was using old model
Browse filesToday pollinations removed the model that was used previously, turbo, and since pollinations no longer offers turbo image generation did not work. Now image generation is klein and it has fallbacks.
app.py
CHANGED
|
@@ -124,7 +124,6 @@ IMAGE_STYLE_PRESETS: Dict[str, str] = {
|
|
| 124 |
|
| 125 |
def _normalize_genre_for_role(*, genre: str, role_id: str) -> tuple[str, str]:
|
| 126 |
"""Return (genre, role_display) after validating role_id belongs to genre.
|
| 127 |
-
|
| 128 |
If role_id maps to a different genre, snap genre to that genre.
|
| 129 |
"""
|
| 130 |
genre = (genre or "").strip()
|
|
@@ -330,7 +329,6 @@ def _get_latest_interrupt_configs(thread_id: str) -> list[Any]:
|
|
| 330 |
|
| 331 |
def _clear_pending_writes_for_cfg(cfg: dict) -> None:
|
| 332 |
"""Clear any pending writes for the checkpoint referenced by cfg.
|
| 333 |
-
|
| 334 |
When rewinding to an older checkpoint_id, LangGraph's in-memory checkpointer can
|
| 335 |
still hold pending writes for that checkpoint (including prior interrupt/resume
|
| 336 |
payloads). Clearing them ensures the next resume consumes the *new* user input.
|
|
@@ -807,7 +805,6 @@ def get_image(state: Story):
|
|
| 807 |
base = "https://gen.pollinations.ai/image/"
|
| 808 |
encoded_prompt = urllib.parse.quote((prompt_text or "").strip(), safe="")
|
| 809 |
params: dict[str, str] = {
|
| 810 |
-
"model": "turbo",
|
| 811 |
"width": str(int(os.environ.get("POLLINATIONS_WIDTH") or "768")),
|
| 812 |
"height": str(int(os.environ.get("POLLINATIONS_HEIGHT") or "768")),
|
| 813 |
# Keep defaults conservative; can be tuned later.
|
|
@@ -822,19 +819,61 @@ def get_image(state: Story):
|
|
| 822 |
elif api_key.startswith("pk_"):
|
| 823 |
params["key"] = api_key
|
| 824 |
|
| 825 |
-
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 832 |
try:
|
| 833 |
-
|
| 834 |
-
|
| 835 |
-
|
| 836 |
-
|
| 837 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 838 |
|
| 839 |
image_bytes: bytes | None = None
|
| 840 |
hf_error: Exception | None = None
|
|
@@ -1479,4 +1518,4 @@ if __name__ == "__main__":
|
|
| 1479 |
head=HEAD,
|
| 1480 |
allowed_paths=[os.path.abspath("frontend")]
|
| 1481 |
)
|
| 1482 |
-
|
|
|
|
| 124 |
|
| 125 |
def _normalize_genre_for_role(*, genre: str, role_id: str) -> tuple[str, str]:
|
| 126 |
"""Return (genre, role_display) after validating role_id belongs to genre.
|
|
|
|
| 127 |
If role_id maps to a different genre, snap genre to that genre.
|
| 128 |
"""
|
| 129 |
genre = (genre or "").strip()
|
|
|
|
| 329 |
|
| 330 |
def _clear_pending_writes_for_cfg(cfg: dict) -> None:
|
| 331 |
"""Clear any pending writes for the checkpoint referenced by cfg.
|
|
|
|
| 332 |
When rewinding to an older checkpoint_id, LangGraph's in-memory checkpointer can
|
| 333 |
still hold pending writes for that checkpoint (including prior interrupt/resume
|
| 334 |
payloads). Clearing them ensures the next resume consumes the *new* user input.
|
|
|
|
| 805 |
base = "https://gen.pollinations.ai/image/"
|
| 806 |
encoded_prompt = urllib.parse.quote((prompt_text or "").strip(), safe="")
|
| 807 |
params: dict[str, str] = {
|
|
|
|
| 808 |
"width": str(int(os.environ.get("POLLINATIONS_WIDTH") or "768")),
|
| 809 |
"height": str(int(os.environ.get("POLLINATIONS_HEIGHT") or "768")),
|
| 810 |
# Keep defaults conservative; can be tuned later.
|
|
|
|
| 819 |
elif api_key.startswith("pk_"):
|
| 820 |
params["key"] = api_key
|
| 821 |
|
| 822 |
+
# Pollinations has changed valid model names over time.
|
| 823 |
+
# Allow overriding via env, otherwise try a small set of known-good options.
|
| 824 |
+
preferred_model = (os.environ.get("POLLINATIONS_MODEL") or "").strip()
|
| 825 |
+
model_candidates = [
|
| 826 |
+
preferred_model,
|
| 827 |
+
# Common/cheap defaults (as of 2026):
|
| 828 |
+
"klein",
|
| 829 |
+
"zimage",
|
| 830 |
+
"flux",
|
| 831 |
+
"klein-large",
|
| 832 |
+
"gptimage"
|
| 833 |
+
]
|
| 834 |
+
seen: set[str] = set()
|
| 835 |
+
models: list[str] = []
|
| 836 |
+
for m in model_candidates:
|
| 837 |
+
m = (m or "").strip()
|
| 838 |
+
if not m or m in seen:
|
| 839 |
+
continue
|
| 840 |
+
seen.add(m)
|
| 841 |
+
models.append(m)
|
| 842 |
+
if not models:
|
| 843 |
+
models = ["flux"]
|
| 844 |
+
|
| 845 |
+
last_err: Exception | None = None
|
| 846 |
+
for model_name in models:
|
| 847 |
+
params["model"] = model_name
|
| 848 |
+
url = base + encoded_prompt + "?" + urllib.parse.urlencode(params)
|
| 849 |
+
req = urllib.request.Request(url, headers=headers, method="GET")
|
| 850 |
try:
|
| 851 |
+
with urllib.request.urlopen(req, timeout=60) as resp:
|
| 852 |
+
data = resp.read()
|
| 853 |
+
# Sometimes errors come back as JSON; treat that as failure so we can fallback.
|
| 854 |
+
if data[:1] == b"{" or b"application/json" in ("".join(resp.headers.get_all("content-type") or [])).encode("utf-8"):
|
| 855 |
+
try:
|
| 856 |
+
body = data.decode("utf-8", errors="ignore")
|
| 857 |
+
except Exception:
|
| 858 |
+
body = ""
|
| 859 |
+
raise RuntimeError(f"Pollinations non-image response for model={model_name}: {body[:300]}")
|
| 860 |
+
return _ensure_png_bytes(data)
|
| 861 |
+
except urllib.error.HTTPError as e:
|
| 862 |
+
body = ""
|
| 863 |
+
try:
|
| 864 |
+
body = e.read().decode("utf-8", errors="ignore")
|
| 865 |
+
except Exception:
|
| 866 |
+
body = ""
|
| 867 |
+
# If Pollinations rejects this model, try the next one.
|
| 868 |
+
if getattr(e, "code", None) in (400, 422) and "model" in body and "Invalid option" in body:
|
| 869 |
+
last_err = RuntimeError(f"Pollinations rejected model={model_name}: {body[:200]}")
|
| 870 |
+
continue
|
| 871 |
+
raise RuntimeError(f"Pollinations HTTP {getattr(e, 'code', '?')}: {body[:300]}")
|
| 872 |
+
except Exception as e:
|
| 873 |
+
last_err = e
|
| 874 |
+
continue
|
| 875 |
+
|
| 876 |
+
raise last_err or RuntimeError("Pollinations image generation failed")
|
| 877 |
|
| 878 |
image_bytes: bytes | None = None
|
| 879 |
hf_error: Exception | None = None
|
|
|
|
| 1518 |
head=HEAD,
|
| 1519 |
allowed_paths=[os.path.abspath("frontend")]
|
| 1520 |
)
|
| 1521 |
+
|