Spaces:
No application file
No application file
Commit ·
3669f09
0
Parent(s):
Ignore images folder
Browse files- .gitattributes +40 -0
- .gitignore +13 -0
- .python-version +1 -0
- README.md +11 -0
- pyproject.toml +22 -0
- src/__init__.py +0 -0
- src/_agents.py +42 -0
- src/main.py +92 -0
- src/model.py +35 -0
- src/tools.py +0 -0
- src/utils/image-generation.py +206 -0
- uv.lock +0 -0
.gitattributes
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python-generated files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[oc]
|
| 4 |
+
build/
|
| 5 |
+
dist/
|
| 6 |
+
wheels/
|
| 7 |
+
*.egg-info
|
| 8 |
+
|
| 9 |
+
# Virtual environments
|
| 10 |
+
.venv
|
| 11 |
+
.env
|
| 12 |
+
|
| 13 |
+
images/
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.11
|
README.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Smgp
|
| 3 |
+
emoji: 🌍
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
short_description: social media content generator
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
pyproject.toml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "smpg"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Add your description here"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.11"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"google>=3.0.0",
|
| 9 |
+
"google-genai>=1.31.0",
|
| 10 |
+
"huggingface-hub>=0.34.4",
|
| 11 |
+
"langfuse>=3.3.4",
|
| 12 |
+
"nest-asyncio>=1.6.0",
|
| 13 |
+
"openai-agents>=0.2.8",
|
| 14 |
+
"pillow>=11.3.0",
|
| 15 |
+
"pydantic>=2.11.7",
|
| 16 |
+
"pydantic-ai[logfire]>=1.0.1",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
[tool.setuptools.packages.find]
|
| 20 |
+
where = ["src"]
|
| 21 |
+
|
| 22 |
+
|
src/__init__.py
ADDED
|
File without changes
|
src/_agents.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agents import Agent, RunContextWrapper
|
| 2 |
+
from model import get_model
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
#----------------------- content agent ----------------------------------
|
| 6 |
+
|
| 7 |
+
def content_prompt(context: RunContextWrapper, agent: Agent):
|
| 8 |
+
"""You are a LinkedIn Content Writer AI.
|
| 9 |
+
Your task is to take a user's description of an idea, event, or announcement and turn it into engaging LinkedIn post text.
|
| 10 |
+
|
| 11 |
+
Guidelines:
|
| 12 |
+
- Start with a strong hook or headline (1 line).
|
| 13 |
+
- Write a clear and engaging body (2-5 short sentences).
|
| 14 |
+
- Use a professional but approachable tone suited for LinkedIn.
|
| 15 |
+
- Add 3-6 relevant hashtags.
|
| 16 |
+
- End with a call-to-action (CTA), e.g., “Let's connect”, “Check out the details”, “Join the discussion”.
|
| 17 |
+
- Keep sentences concise and scannable for social media.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
content_agent = Agent(
|
| 23 |
+
name="content_agent",
|
| 24 |
+
instructions=content_prompt,
|
| 25 |
+
model=get_model('gemini-2.0-flash'),
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
#----------------------- content agent ------------------------------------
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
#----------------------- Imgae agent -------------------------------------------
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
src/main.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import asyncio
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from agents import set_tracing_disabled, Runner, SQLiteSession
|
| 5 |
+
from _agents import content_agent
|
| 6 |
+
from pydantic import BaseModel, Field, ConfigDict
|
| 7 |
+
from typing import Optional
|
| 8 |
+
import nest_asyncio
|
| 9 |
+
import logfire
|
| 10 |
+
from langfuse import get_client
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# set_tracing_disabled(disabled=True)
|
| 14 |
+
|
| 15 |
+
# set_tracing_disabled(disabled=True)
|
| 16 |
+
|
| 17 |
+
nest_asyncio.apply()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logfire.configure(
|
| 21 |
+
service_name='smpg_agent',
|
| 22 |
+
send_to_logfire=False,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
logfire.instrument_openai_agents()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
langfuse = get_client()
|
| 30 |
+
|
| 31 |
+
# Verify connection
|
| 32 |
+
if langfuse.auth_check():
|
| 33 |
+
print("Langfuse client is authenticated and ready!")
|
| 34 |
+
else:
|
| 35 |
+
print("Authentication failed. Please check your credentials and host.")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class AgentContext(BaseModel):
|
| 41 |
+
user_input: Optional[str] = Field(default=None,description="The input provided by the user to the agent.")
|
| 42 |
+
post_content: Optional[str] = Field(default=None,description="The generated content for the post.")
|
| 43 |
+
|
| 44 |
+
model_config= ConfigDict(extra='forbid')
|
| 45 |
+
|
| 46 |
+
async def main():
|
| 47 |
+
print("Hello from smpg!")
|
| 48 |
+
|
| 49 |
+
session = SQLiteSession("conversation_123")
|
| 50 |
+
|
| 51 |
+
agent_context = AgentContext(
|
| 52 |
+
user_input="Write a LinkedIn post about the importance of AI in modern business strategies."
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# orchestrator_agent = Agent(
|
| 58 |
+
# name="orchestrator_agent",
|
| 59 |
+
# instructions=(
|
| 60 |
+
# "You are a translation agent. You use the tools given to you to translate."
|
| 61 |
+
# "If asked for multiple translations, you call the relevant tools in order."
|
| 62 |
+
# "You never translate on your own, you always use the provided tools."
|
| 63 |
+
# ),
|
| 64 |
+
# tools=[
|
| 65 |
+
# spanish_agent.as_tool(
|
| 66 |
+
# tool_name="translate_to_spanish",
|
| 67 |
+
# tool_description="Translate the user's message to Spanish",
|
| 68 |
+
# ),
|
| 69 |
+
# french_agent.as_tool(
|
| 70 |
+
# tool_name="translate_to_french",
|
| 71 |
+
# tool_description="Translate the user's message to French",
|
| 72 |
+
# ),
|
| 73 |
+
# italian_agent.as_tool(
|
| 74 |
+
# tool_name="translate_to_italian",
|
| 75 |
+
# tool_description="Translate the user's message to Italian",
|
| 76 |
+
# ),
|
| 77 |
+
# ],
|
| 78 |
+
# model=model
|
| 79 |
+
# )
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
result = await Runner.run(content_agent, context=agent_context, input=agent_context.user_input, session=session ,max_turns=5)
|
| 84 |
+
|
| 85 |
+
agent_context.post_content = result.final_output
|
| 86 |
+
print("Generated Post Content:")
|
| 87 |
+
print(agent_context.post_content)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
asyncio.run(main())
|
src/model.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agents import OpenAIChatCompletionsModel, AsyncOpenAI
|
| 2 |
+
from dotenv import load_dotenv, find_dotenv
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
_: bool = load_dotenv(find_dotenv())
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")
|
| 9 |
+
google_api_key = os.getenv("GOOGLE_API_KEY")
|
| 10 |
+
# grok_api_key = os.getenv("GROK_API_KEY")
|
| 11 |
+
openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
|
| 12 |
+
|
| 13 |
+
DEEPSEEK_BASE_URL = "https://api.deepseek.com/v1"
|
| 14 |
+
GROK_BASE_URL = "https://api.x.ai/v1"
|
| 15 |
+
GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 16 |
+
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
openrouter_client = AsyncOpenAI(base_url=OPENROUTER_BASE_URL, api_key=openrouter_api_key)
|
| 21 |
+
# deepseek_client = AsyncOpenAI(base_url=DEEPSEEK_BASE_URL, api_key=deepseek_api_key)
|
| 22 |
+
# grok_client = AsyncOpenAI(base_url=GROK_BASE_URL, api_key=grok_api_key)
|
| 23 |
+
gemini_client = AsyncOpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
|
| 24 |
+
|
| 25 |
+
def get_model(model_name: str):
|
| 26 |
+
if "/" in model_name:
|
| 27 |
+
return OpenAIChatCompletionsModel(model=model_name, openai_client=openrouter_client)
|
| 28 |
+
# elif "deepseek" in model_name:
|
| 29 |
+
# return OpenAIChatCompletionsModel(model=model_name, openai_client=deepseek_client)
|
| 30 |
+
# elif "grok" in model_name:
|
| 31 |
+
# return OpenAIChatCompletionsModel(model=model_name, openai_client=grok_client)
|
| 32 |
+
elif "gemini" in model_name:
|
| 33 |
+
return OpenAIChatCompletionsModel(model=model_name, openai_client=gemini_client)
|
| 34 |
+
else:
|
| 35 |
+
return model_name
|
src/tools.py
ADDED
|
File without changes
|
src/utils/image-generation.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
from google import genai
|
| 4 |
+
from huggingface_hub import InferenceClient
|
| 5 |
+
from dotenv import load_dotenv, find_dotenv
|
| 6 |
+
|
| 7 |
+
_: bool = load_dotenv(find_dotenv())
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
client = genai.Client(
|
| 11 |
+
# vertexai=True, project='1089055075981', location='us-central1'
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
aspect_ratios = {
|
| 16 |
+
"1:1": (1328, 1328),
|
| 17 |
+
"16:9": (1664, 928),
|
| 18 |
+
"9:16": (928, 1664),
|
| 19 |
+
"4:3": (1472, 1140),
|
| 20 |
+
"3:4": (1140, 1472),
|
| 21 |
+
"3:2": (1584, 1056),
|
| 22 |
+
"2:3": (1056, 1584),
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
hf_client = InferenceClient(
|
| 26 |
+
provider="replicate",
|
| 27 |
+
api_key=os.environ["HF_TOKEN"],
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def generate_json_from_idea(idea: str, defaultValues:str) -> str:
|
| 32 |
+
"""Generates a detailed JSON prompt from a simple idea using a Gemini model."""
|
| 33 |
+
|
| 34 |
+
# The prompt template includes the desired JSON schema structure.
|
| 35 |
+
schema_template = """Convert the attached user idea into a detailed JSON object for generating an image. The output should only be the raw JSON object, without any markdown formatting like ```json ... ```.
|
| 36 |
+
|
| 37 |
+
Idea: "{idea}"
|
| 38 |
+
|
| 39 |
+
defaultValues:{defaultValues}
|
| 40 |
+
|
| 41 |
+
Schema:
|
| 42 |
+
{{
|
| 43 |
+
"meta": {{
|
| 44 |
+
"styleName": "...", // A unique, descriptive name for this specific image style or preset (e.g., "Ethereal Forest Magic", "Cyberpunk Noir Alley").
|
| 45 |
+
"aspectRatio": "...", // The proportional relationship between the width and height of the image (e.g., "16:9", "1:1", "4:5", "21:9").
|
| 46 |
+
"promptPrefix": "..." // Optional text to prepend to a generated prompt, like a file name, a version number, or a specific trigger word.
|
| 47 |
+
}},
|
| 48 |
+
"camera": {{
|
| 49 |
+
"model": "...", // Describes the camera, lens, or artistic medium used (e.g., "DSLR", "iPhone 15 Pro", "8x10 view camera", "Watercolor on cold-press paper", "3D render in Blender").
|
| 50 |
+
"focalLength": "...", // The lens's focal length, which affects the field of view and perspective distortion (e.g., "16mm wide-angle", "85mm portrait", "200mm telephoto", "Isometric perspective").
|
| 51 |
+
"angle": "...", // The camera's angle relative to the main subject or scene (e.g., "eye-level", "high-angle", "dutch angle", "drone shot", "worm's-eye view").
|
| 52 |
+
"type": "..." // The genre or type of photography or art style (e.g., "macro photography", "landscape", "fantasy illustration", "architectural rendering", "abstract art").
|
| 53 |
+
}},
|
| 54 |
+
"subject": {{
|
| 55 |
+
"primary": "...", // The main focal point or subject of the image (e.g., "a majestic mountain range", "a lone wolf", "an ancient wizard", "a futuristic cityscape", "an abstract shape").
|
| 56 |
+
"emotion": "...", // The dominant emotion or mood conveyed by the subject or the overall scene (e.g., "serene and peaceful", "joyful", "melancholy", "menacing", "awe-inspiring").
|
| 57 |
+
"pose": "...", // The posture, action, or arrangement of the subject(s) (e.g., "running towards the camera", "sitting in quiet contemplation", "a winding river", "a chaotic explosion").
|
| 58 |
+
"gaze": "..." // The direction of the subject's gaze or the directional focus of the composition (e.g., "looking off-camera", "breaking the fourth wall", "facing away from the viewer", "pointing towards the horizon").
|
| 59 |
+
}},
|
| 60 |
+
"character": {{
|
| 61 |
+
"appearance": "...", // Detailed physical description of a character or key object (e.g., "weathered face with a long white beard", "sleek, chrome-plated robot", "moss-covered ancient tree").
|
| 62 |
+
"wardrobe": "...", // Clothing, armor, or any form of covering on the subject (e.g., "ornate golden armor", "tattered rags", "a vibrant kimono", "a car's glossy paint job").
|
| 63 |
+
"accessories": "..." // Additional items worn by or associated with the subject (e.g., "a magical amulet", "cybernetic implants", "a pair of glasses", "a sword and shield").
|
| 64 |
+
}},
|
| 65 |
+
"composition": {{
|
| 66 |
+
"theory": "...", // The compositional rules or theories applied (e.g., "rule of thirds", "golden ratio", "leading lines", "symmetrical balance", "negative space").
|
| 67 |
+
"visualHierarchy": "..." // Describes the order in which the viewer's eye is drawn to different elements in the scene, from most to least prominent.
|
| 68 |
+
}},
|
| 69 |
+
"setting": {{
|
| 70 |
+
"environment": "...", // The general environment or location of the scene (e.g., "a mystical forest", "a bustling cyberpunk city", "a tranquil beach at sunset", "a minimalist white room", "the surface of Mars").
|
| 71 |
+
"architecture": "...", // Describes any buildings, ruins, or significant natural structures (e.g., "gothic cathedrals", "brutalist architecture", "alien monoliths", "towering rock formations").
|
| 72 |
+
"furniture": "..." // Key objects, props, or furniture within the setting that add context or detail (e.g., "a single throne", "scattered futuristic debris", "a rustic wooden fence").
|
| 73 |
+
}},
|
| 74 |
+
"lighting": {{
|
| 75 |
+
"source": "...", // The primary source of light in the scene (e.g., "dramatic moonlight", "soft window light", "flickering candlelight", "neon signs", "magical glow").
|
| 76 |
+
"direction": "...", // The direction from which the light originates (e.g., "backlighting", "rim lighting", "top-down light", "light from below").
|
| 77 |
+
"quality": "..." // The quality and characteristics of the light and shadows (e.g., "soft and diffused", "hard and high-contrast", "dappled", "volumetric light rays", "caustic reflections").
|
| 78 |
+
}},
|
| 79 |
+
"style": {{
|
| 80 |
+
"artDirection": "...", // The overarching artistic style, movement, or influence (e.g., "impressionism", "art deco", "cyberpunk", "vaporwave", "ghibli-inspired", "cinematic").
|
| 81 |
+
"mood": "..." // The overall mood, feeling, or atmosphere of the image (e.g., "ethereal and dreamy", "dystopian and gritty", "whimsical and cheerful", "epic and dramatic").
|
| 82 |
+
}},
|
| 83 |
+
"rendering": {{
|
| 84 |
+
"engine": "...", // The rendering engine, technique, or medium used to create the final image (e.g., "Octane Render", "oil painting", "cross-hatching", "pixel art", "Unreal Engine 5").
|
| 85 |
+
"fidelitySpec": "...", // Specific details about the image's texture and fidelity (e.g., "heavy film grain", "sharp digital focus", "visible brushstrokes", "chromatic aberration", "lens flare").
|
| 86 |
+
"postProcessing": "..." // Any post-processing or finishing effects applied (e.g., "color grading with a teal and orange look", "vignette", "bloom and glare", "a vintage photo filter").
|
| 87 |
+
}},
|
| 88 |
+
"colorPalette": {{
|
| 89 |
+
"primaryColors": [ // The most dominant colors that define the overall color scheme of the image.
|
| 90 |
+
{{ "name": "...", "hex": "...", "percentage": "..." }},
|
| 91 |
+
{{ "name": "...", "hex": "...", "percentage": "..." }}
|
| 92 |
+
],
|
| 93 |
+
"accentColors": [ // Complementary or contrasting colors used for emphasis, detail, or highlights.
|
| 94 |
+
{{ "name": "...", "hex": "...", "percentage": "..." }},
|
| 95 |
+
{{ "name": "...", "hex": "...", "percentage": "..." }}
|
| 96 |
+
]
|
| 97 |
+
}}
|
| 98 |
+
}}
|
| 99 |
+
"""
|
| 100 |
+
prompt = schema_template.format(idea=idea,defaultValues=defaultValues)
|
| 101 |
+
|
| 102 |
+
response = client.models.generate_content(
|
| 103 |
+
model="gemini-2.5-pro",
|
| 104 |
+
contents=prompt,
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
print(response.text)
|
| 108 |
+
return response.text
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def generate_images(
|
| 112 |
+
idea: str,
|
| 113 |
+
prompt: str,
|
| 114 |
+
aspectRatio: str = "1:1", # "1:1", "3:4", "4:3", "9:16", and "16:9".
|
| 115 |
+
output_dir: str = "images",
|
| 116 |
+
num_images: int = 1,
|
| 117 |
+
):
|
| 118 |
+
"""Generates images using Imagen and saves them to the output directory."""
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
try:
|
| 122 |
+
print(prompt)
|
| 123 |
+
print(f"Generating {num_images} images for {idea}")
|
| 124 |
+
# response = client.models.generate_images(
|
| 125 |
+
# model="imagen-4.0-generate-preview-06-06",
|
| 126 |
+
# prompt=prompt,
|
| 127 |
+
# config=genai.types.GenerateImagesConfig(
|
| 128 |
+
# number_of_images=num_images,
|
| 129 |
+
# aspect_ratio=aspectRatio,
|
| 130 |
+
# ),
|
| 131 |
+
# )
|
| 132 |
+
|
| 133 |
+
width, height = aspect_ratios[aspectRatio]
|
| 134 |
+
response = hf_client.text_to_image(
|
| 135 |
+
prompt=prompt,
|
| 136 |
+
model="Qwen/Qwen-Image",
|
| 137 |
+
width=width,
|
| 138 |
+
height=height,
|
| 139 |
+
|
| 140 |
+
)
|
| 141 |
+
print(response)
|
| 142 |
+
|
| 143 |
+
clean_idea = (re.sub(r"[^a-zA-Z0-9\s]", "", idea[:30]).lower().replace(" ", "-"))
|
| 144 |
+
image_path = os.path.join(output_dir, f"{clean_idea}-.png")
|
| 145 |
+
response.save(image_path)
|
| 146 |
+
with open(os.path.join(output_dir, f"{clean_idea}.json"), "w") as f:
|
| 147 |
+
f.write(prompt)
|
| 148 |
+
print(f"Saved image and prompt to {image_path}")
|
| 149 |
+
|
| 150 |
+
# for i, generated_image in enumerate(response):
|
| 151 |
+
# clean_idea = (
|
| 152 |
+
# re.sub(r"[^a-zA-Z0-9\s]", "", idea[:30]).lower().replace(" ", "-")
|
| 153 |
+
# )
|
| 154 |
+
# image_path = os.path.join(output_dir, f"{clean_idea}-{i+1}.png")
|
| 155 |
+
# generated_image.image.save(image_path)
|
| 156 |
+
# with open(os.path.join(output_dir, f"{clean_idea}.json"), "w") as f:
|
| 157 |
+
# f.write(prompt)
|
| 158 |
+
# print(f"Saved image and prompt to {image_path}")
|
| 159 |
+
|
| 160 |
+
except Exception as e:
|
| 161 |
+
print(f"An error occurred during image generation: {e}")
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def generate(
|
| 165 |
+
idea: str,
|
| 166 |
+
output_dir: str = "images",
|
| 167 |
+
aspectRatio: str = "1:1",
|
| 168 |
+
num_images: int = 1,
|
| 169 |
+
):
|
| 170 |
+
"""Orchestrates the process of generating JSON and then generating images."""
|
| 171 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 172 |
+
|
| 173 |
+
ideaJson = generate_json_from_idea(idea,f"aspect_ratio:{aspectRatio}")
|
| 174 |
+
|
| 175 |
+
generate_images(
|
| 176 |
+
idea=idea,
|
| 177 |
+
prompt=f"""
|
| 178 |
+
{idea}
|
| 179 |
+
|
| 180 |
+
<MetaSchemaForImageGeneration>
|
| 181 |
+
{ideaJson}
|
| 182 |
+
</MetaSchemaForImageGeneration>
|
| 183 |
+
""",
|
| 184 |
+
aspectRatio="1:1",
|
| 185 |
+
output_dir=output_dir,
|
| 186 |
+
num_images=num_images,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
if __name__ == "__main__":
|
| 191 |
+
ideas = [
|
| 192 |
+
# "A energy drink with water drops on it, ultra realistic, for a commercial.",
|
| 193 |
+
# "Graffiti with the text 'JSON Schema' on a brick wall.",
|
| 194 |
+
# "A LEGO knight fighting a huge, fire-breathing dragon on a castle wall.",
|
| 195 |
+
# "A stylish woman sipping coffee at a Parisian cafe, with the Eiffel Tower in the background. Shot in golden hour.",
|
| 196 |
+
# "An emotional, close-up portrait of an old fisherman.",
|
| 197 |
+
# "A vast, alien landscape on a distant planet with two suns, strange, towering rock formations, and bioluminescent plants. Epic sci-fi concept art.",
|
| 198 |
+
# "A whimsical illustration of a friendly fox reading a book in a cozy, cluttered library. The text 'The Midnight Reader' should be subtly integrated on a book spine.",
|
| 199 |
+
# "A magical man with sparkling pink hair and large from an anime.",
|
| 200 |
+
# "A cartoon robot waving happily, with a simple, bold outline and bright, flat colors. ",
|
| 201 |
+
# "A full-body character sheet of a realistic pirate captain, showing front, back, and side views.",
|
| 202 |
+
"""An AI brain made of digital circuits seamlessly integrated into a modern office skyline, representing artificial intelligence as essential for business growth and innovation"""
|
| 203 |
+
]
|
| 204 |
+
|
| 205 |
+
for idea in ideas:
|
| 206 |
+
generate(idea=idea,aspectRatio="1:1")
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|