fahmiaziz98 commited on
Commit
1061354
·
1 Parent(s): 07338e1
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +59 -8
  2. app.py +0 -8
  3. pyproject.toml +16 -0
  4. requirements.txt +7 -3
  5. src/__init__.py +0 -0
  6. src/__pycache__/__init__.cpython-312.pyc +0 -0
  7. src/__pycache__/config.cpython-312.pyc +0 -0
  8. src/__pycache__/main.cpython-312.pyc +0 -0
  9. src/api/__init__.py +0 -0
  10. src/api/__pycache__/__init__.cpython-312.pyc +0 -0
  11. src/api/__pycache__/dependency.cpython-312.pyc +0 -0
  12. src/api/__pycache__/router.cpython-312.pyc +0 -0
  13. src/api/dependency.py +0 -0
  14. src/api/v1/__init__.py +0 -0
  15. src/api/v1/__pycache__/__init__.cpython-312.pyc +0 -0
  16. src/api/v1/__pycache__/ad_routers.cpython-312.pyc +0 -0
  17. src/api/v1/__pycache__/imagen_router.cpython-312.pyc +0 -0
  18. src/api/v1/ad_routers.py +94 -0
  19. src/api/v1/imagen_router.py +99 -0
  20. src/config/__init__.py +3 -0
  21. src/config/__pycache__/__init__.cpython-312.pyc +0 -0
  22. src/config/__pycache__/settings.cpython-312.pyc +0 -0
  23. src/config/settings.py +80 -0
  24. src/core/__init__.py +0 -0
  25. src/core/__pycache__/__init__.cpython-312.pyc +0 -0
  26. src/core/__pycache__/ad_generator.cpython-312.pyc +0 -0
  27. src/core/__pycache__/base.cpython-312.pyc +0 -0
  28. src/core/__pycache__/image_generator.cpython-312.pyc +0 -0
  29. src/core/__pycache__/llm.cpython-312.pyc +0 -0
  30. src/core/__pycache__/prompt_templates.cpython-312.pyc +0 -0
  31. src/core/ad_generator.py +149 -0
  32. src/core/base.py +17 -0
  33. src/core/image_generator.py +64 -0
  34. src/llm/__init__.py +0 -0
  35. src/llm/__pycache__/__init__.cpython-312.pyc +0 -0
  36. src/llm/__pycache__/base.cpython-312.pyc +0 -0
  37. src/llm/__pycache__/gemini_client.cpython-312.pyc +0 -0
  38. src/llm/__pycache__/openai_client.cpython-312.pyc +0 -0
  39. src/llm/base.py +44 -0
  40. src/llm/gemini_client.py +69 -0
  41. src/llm/openai_client.py +104 -0
  42. src/main.py +40 -0
  43. src/models/__init__.py +9 -0
  44. src/models/__pycache__/__init__.cpython-312.pyc +0 -0
  45. src/models/__pycache__/ad.cpython-312.pyc +0 -0
  46. src/models/__pycache__/requests.cpython-312.pyc +0 -0
  47. src/models/__pycache__/response.cpython-312.pyc +0 -0
  48. src/models/requests.py +44 -0
  49. src/models/response.py +43 -0
  50. src/prompts/__init__.py +0 -0
Dockerfile CHANGED
@@ -1,17 +1,68 @@
1
- # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
- # you will also find guides on how best to write your Dockerfile
 
3
 
4
- FROM python:3.9
 
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  RUN useradd -m -u 1000 user
 
7
  USER user
8
- ENV PATH="/home/user/.local/bin:$PATH"
 
 
 
9
 
10
  WORKDIR /app
11
 
12
- COPY --chown=user ./requirements.txt requirements.txt
13
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
 
 
 
 
 
14
 
15
- COPY --chown=user . /app
16
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
17
 
 
 
 
1
+ # backend/Dockerfile.pip
2
+ # === BUILD STAGE ===
3
+ FROM python:3.10-slim AS build
4
 
5
+ ENV PIP_NO_CACHE_DIR=1 \
6
+ PYTHONUNBUFFERED=1 \
7
+ PATH="/home/user/.local/bin:$PATH"
8
 
9
+ # Install system dependencies for building
10
+ RUN apt-get update && apt-get install -y \
11
+ build-essential \
12
+ libjpeg-dev \
13
+ zlib1g-dev \
14
+ libpng-dev \
15
+ libffi-dev \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Create user
19
+ RUN useradd -m -u 1000 user
20
+
21
+ USER user
22
+ WORKDIR /app
23
+
24
+ # Copy requirements file
25
+ COPY --chown=user requirements.txt ./
26
+
27
+ # Install dependencies using PIP
28
+ RUN pip install --no-cache-dir --user -r requirements.txt
29
+
30
+ # === FINAL STAGE ===
31
+ FROM python:3.10-slim
32
+
33
+ # Install runtime dependencies only
34
+ RUN apt-get update && apt-get install -y \
35
+ libjpeg-dev \
36
+ zlib1g-dev \
37
+ libpng-dev \
38
+ libffi-dev \
39
+ curl \
40
+ && apt-get autoremove -y \
41
+ && apt-get clean -y \
42
+ && rm -rf /var/lib/apt/lists/*
43
+
44
+ # Create user
45
  RUN useradd -m -u 1000 user
46
+
47
  USER user
48
+
49
+ ENV PATH="/home/user/.local/bin:$PATH" \
50
+ PYTHONUNBUFFERED=1 \
51
+ PIP_NO_CACHE_DIR=1
52
 
53
  WORKDIR /app
54
 
55
+ # Copy installed packages from builder
56
+ COPY --from=build /home/user/.local /home/user/.local
57
+
58
+ # Copy application code
59
+ COPY --chown=user . .
60
+
61
+ EXPOSE 7860
62
 
63
+ # Health check
64
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
65
+ CMD curl -f http://localhost:7860/healthcheck || exit 1
66
 
67
+ # Production command
68
+ CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "7860"]
app.py DELETED
@@ -1,8 +0,0 @@
1
- from fastapi import FastAPI
2
-
3
- app = FastAPI()
4
-
5
- @app.get("/")
6
- def greet_json():
7
- return {"Hello": "World!"}
8
-
 
 
 
 
 
 
 
 
 
pyproject.toml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "server"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "fastapi[standard]==0.116.1",
9
+ "google-genai==1.27.0",
10
+ "loguru==0.7.3",
11
+ "openai==1.97.0",
12
+ "pillow==11.3.0",
13
+ "psutil>=7.0.0",
14
+ "pydantic-settings==2.10.1",
15
+ "python-dotenv>=1.1.1",
16
+ ]
requirements.txt CHANGED
@@ -1,3 +1,7 @@
1
- fastapi
2
- uvicorn[standard]
3
-
 
 
 
 
 
1
+ fastapi[standard]==0.116.1
2
+ google-genai==1.27.0
3
+ loguru==0.7.3
4
+ openai==1.97.0
5
+ pillow==11.3.0
6
+ pydantic-settings==2.10.1
7
+ python-dotenv>=1.1.1
src/__init__.py ADDED
File without changes
src/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (160 Bytes). View file
 
src/__pycache__/config.cpython-312.pyc ADDED
Binary file (1.44 kB). View file
 
src/__pycache__/main.cpython-312.pyc ADDED
Binary file (1.65 kB). View file
 
src/api/__init__.py ADDED
File without changes
src/api/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (164 Bytes). View file
 
src/api/__pycache__/dependency.cpython-312.pyc ADDED
Binary file (1.33 kB). View file
 
src/api/__pycache__/router.cpython-312.pyc ADDED
Binary file (2.32 kB). View file
 
src/api/dependency.py ADDED
File without changes
src/api/v1/__init__.py ADDED
File without changes
src/api/v1/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (167 Bytes). View file
 
src/api/v1/__pycache__/ad_routers.cpython-312.pyc ADDED
Binary file (3.64 kB). View file
 
src/api/v1/__pycache__/imagen_router.cpython-312.pyc ADDED
Binary file (3.57 kB). View file
 
src/api/v1/ad_routers.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from loguru import logger
3
+ from typing import Annotated
4
+ from fastapi import (
5
+ APIRouter,
6
+ HTTPException,
7
+ Depends
8
+ )
9
+ from fastapi.responses import StreamingResponse
10
+
11
+ from src.models import AdGenerationRequest, AdGenerationResponse
12
+ from src.service.ad_service import get_ad_service, AdService
13
+ from src.utils.helpers import generate_request_id
14
+ from src.config import settings
15
+
16
+
17
+ router = APIRouter(prefix=settings.API_V1_PREFIX, tags=["Advertisement Generation"])
18
+
19
+ @router.post(
20
+ "/generate",
21
+ response_model=AdGenerationResponse,
22
+ summary="Generate Advertisement",
23
+ description="Generate advertisement with optional image processing"
24
+ )
25
+ async def generate_ad(
26
+ request: AdGenerationRequest,
27
+ ad_service: Annotated[AdService, Depends(get_ad_service)],
28
+ ):
29
+ """
30
+ Generate advertisement content based on the provided request.
31
+
32
+ Parameters:
33
+ - request: AdGenerationRequest containing product details and ad settings.
34
+
35
+ Returns:
36
+ - AdGenerationResponse with generated ad content and metadata.
37
+ """
38
+ try:
39
+ response = await ad_service.generate_ad(request)
40
+ return response
41
+
42
+ except HTTPException:
43
+ raise
44
+ except Exception as e:
45
+ raise HTTPException(
46
+ status_code=500,
47
+ detail={
48
+ "error": "generation_failed",
49
+ "message": str(e),
50
+ "request_id": generate_request_id()
51
+ }
52
+ )
53
+
54
+ @router.post(
55
+ "/generate-stream",
56
+ summary="Generate Advertisement (Streaming)",
57
+ description="Generate advertisement with streaming response"
58
+ )
59
+ async def generate_ad_streaming(
60
+ request: AdGenerationRequest,
61
+ ad_service: Annotated[AdService, Depends(get_ad_service)],
62
+ ):
63
+ """
64
+ Generate advertisement content with streaming response.
65
+
66
+ Parameters:
67
+ - request: AdGenerationRequest containing product details and ad settings.
68
+
69
+ Returns:
70
+ - StreamingResponse with chunks of generated ad content.
71
+ """
72
+ async def stream_response():
73
+ try:
74
+ async for chunk in ad_service.generate_ad_streaming(request):
75
+ yield json.dumps(chunk, default=str) + "\n"
76
+ except Exception as e:
77
+ logger.info(e)
78
+ error_response = {
79
+ "status": "error",
80
+ "error_code": "generation_failed",
81
+ "message": str(e),
82
+ "request_id": generate_request_id()
83
+ }
84
+ yield json.dumps(error_response) + "\n"
85
+
86
+ return StreamingResponse(
87
+ stream_response(),
88
+ media_type="text/plain",
89
+ headers={
90
+ "Cache-Control": "no-cache",
91
+ "Connection": "keep-alive",
92
+ "X-Accel-Buffering": "no" # Disable nginx buffering
93
+ }
94
+ )
src/api/v1/imagen_router.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from loguru import logger
2
+ from pathlib import Path
3
+ from typing import Annotated
4
+ from fastapi import (
5
+ APIRouter,
6
+ HTTPException,
7
+ Depends
8
+ )
9
+
10
+ from fastapi.responses import FileResponse
11
+
12
+ from src.models.requests import ImageGenerationRequest
13
+ from src.models.response import ImageResult
14
+ from src.service.imagen_service import get_imagen_service, ImageService
15
+ from src.config import settings
16
+ from src.utils.helpers import generate_request_id
17
+
18
+
19
+ router = APIRouter(prefix=settings.API_V1_PREFIX, tags=["Image Generation"])
20
+
21
+ @router.post(
22
+ "/generate-image",
23
+ response_model=ImageResult,
24
+ summary="Generate Image",
25
+ description="Generate an image based on product details"
26
+ )
27
+ async def generate_image(
28
+ request: ImageGenerationRequest,
29
+ imagen_service: Annotated[ImageService, Depends(get_imagen_service)],
30
+ ):
31
+ """
32
+ Generate an image based on the provided product details.
33
+
34
+ Parameters:
35
+ - request: ImageGenerationRequest containing product details.
36
+
37
+ Returns:
38
+ - ImageResult with generated image URL and metadata.
39
+ """
40
+ try:
41
+ result = imagen_service.generate_image(request)
42
+ if result is None:
43
+ raise HTTPException(status_code=500, detail="Image generation failed")
44
+ return result
45
+
46
+ except HTTPException:
47
+ raise
48
+ except Exception as e:
49
+ raise HTTPException(
50
+ status_code=500,
51
+ detail={
52
+ "error": "image_generation_failed",
53
+ "message": str(e),
54
+ "request_id": generate_request_id()
55
+ }
56
+ )
57
+
58
+ from pathlib import Path
59
+
60
+ @router.get(
61
+ "/images/{file_name}",
62
+ summary="Get Image",
63
+ description="Retrieve an image by its file name"
64
+ )
65
+ async def get_image(file_name: str):
66
+ """
67
+ Retrieve an image by its file name.
68
+
69
+ Parameters:
70
+ - file_name: Name of the image file to retrieve.
71
+
72
+ Returns:
73
+ - Image file if found, otherwise raises HTTP 404 error.
74
+ """
75
+ logger.info(f"Retrieving image: {file_name}")
76
+ try:
77
+ upload_dir = Path(settings.UPLOAD_DIR)
78
+ file_path = upload_dir / file_name
79
+
80
+ if not file_path.exists():
81
+ logger.warning(f"Image not found: {file_name}")
82
+ raise HTTPException(status_code=404, detail="Image not found")
83
+
84
+ logger.info(f"Image retrieved successfully: {file_name}")
85
+ return FileResponse(
86
+ path=file_path,
87
+ media_type="image/png",
88
+ filename=file_name
89
+ )
90
+ except Exception as e:
91
+ logger.critical(f"Critical error during image retrieval: {e}")
92
+ raise HTTPException(
93
+ status_code=500,
94
+ detail={
95
+ "error": "image_retrieval_failed",
96
+ "message": str(e),
97
+ "request_id": generate_request_id()
98
+ }
99
+ )
src/config/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .settings import settings
2
+
3
+ __all__ = ["settings"]
src/config/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (228 Bytes). View file
 
src/config/__pycache__/settings.cpython-312.pyc ADDED
Binary file (3.05 kB). View file
 
src/config/settings.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from pathlib import Path
3
+ from functools import lru_cache
4
+ from pydantic_settings import BaseSettings
5
+ from dotenv import load_dotenv, find_dotenv
6
+
7
+ _ = load_dotenv(find_dotenv())
8
+
9
+
10
+ class Settings(BaseSettings):
11
+ # App Configuration
12
+ APP_NAME: str = "Ad Generator MVP"
13
+ APP_VERSION: str = "1.0.0"
14
+ APP_DESCRIPTION: str = "A minimal viable product for ad generation using LLMs."
15
+ DEBUG: bool = False
16
+ HOST: str = "0.0.0.0"
17
+ PORT: int = 8000
18
+
19
+ # API Configuration
20
+ API_V1_PREFIX: str = "/api/v1"
21
+ CORS_ORIGINS: list[str] = ["*"]
22
+
23
+ # File Upload Configuration
24
+ UPLOAD_DIR: str = "uploads"
25
+ MAX_FILE_SIZE: int = 5 * 1024 * 1024 # 5MB
26
+ ALLOWED_FILE_EXTENSIONS: set[str] = {".jpg", ".jpeg", ".png", ".webp"}
27
+ ALLOWED_IMAGE_TYPES: set[str] = {"image/jpeg", "image/png", "image/webp"}
28
+
29
+
30
+ # LLM settings
31
+ LUNOS_API_KEY: str
32
+ LUNOS_BASE_URL: str = "https://api.lunos.tech/v1"
33
+ DEFAULT_MODEL_NAME: str = "google/gemma-3-12b-it"
34
+
35
+ # Gemini Image Generation settings
36
+ GEMINI_API_KEY: str
37
+ GEMINI_IMAGE_MODEL_NAME: str = "gemini-2.0-flash-preview-image-generation"
38
+
39
+ # add safety settings
40
+ SAFETY_SETTINGS: list[dict] = [
41
+ {
42
+ "category": "HARM_CATEGORY_HARASSMENT",
43
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
44
+ },
45
+ {
46
+ "category": "HARM_CATEGORY_HATE_SPEECH",
47
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
48
+ },
49
+ {
50
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
51
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
52
+ },
53
+ {
54
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
55
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
56
+ }
57
+ ]
58
+ # Rate Limiting
59
+ RATE_LIMIT_REQUESTS: int = 100
60
+ RATE_LIMIT_WINDOW: int = 3600 # 1 hour in seconds
61
+
62
+ # Storage Configuration
63
+ STORAGE_TYPE: str = "local" # local, s3, cloudinary
64
+
65
+
66
+ class Config:
67
+ env_file = ".env"
68
+ env_file_encoding = "utf-8"
69
+ case_sensitive = True
70
+
71
+ def __init__(self, **kwargs):
72
+ super().__init__(**kwargs)
73
+ # Create upload directory if it doesn't exist
74
+ Path(self.UPLOAD_DIR).mkdir(exist_ok=True)
75
+
76
+ @lru_cache()
77
+ def get_settings() -> Settings:
78
+ return Settings()
79
+
80
+ settings = get_settings()
src/core/__init__.py ADDED
File without changes
src/core/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (165 Bytes). View file
 
src/core/__pycache__/ad_generator.cpython-312.pyc ADDED
Binary file (6.6 kB). View file
 
src/core/__pycache__/base.cpython-312.pyc ADDED
Binary file (1.15 kB). View file
 
src/core/__pycache__/image_generator.cpython-312.pyc ADDED
Binary file (3.57 kB). View file
 
src/core/__pycache__/llm.cpython-312.pyc ADDED
Binary file (1.77 kB). View file
 
src/core/__pycache__/prompt_templates.cpython-312.pyc ADDED
Binary file (5.91 kB). View file
 
src/core/ad_generator.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from time import time
2
+ from typing import Any, Dict, AsyncIterator
3
+ from src.core.base import BaseAdGenerator
4
+ from src.models import (
5
+ AdGenerationRequest,
6
+ AdGenerationResponse,
7
+ ProductInfo,
8
+ AdSettings
9
+ )
10
+ from src.llm.openai_client import OpenAIClient
11
+ from src.prompts.templates import FlexibleAdPromptGenerator
12
+ from src.utils.helpers import generate_request_id
13
+
14
+
15
+ class AIAdGenerator(BaseAdGenerator):
16
+ """
17
+ AIAdGenerator is responsible for generating advertisements using an LLM client.
18
+ It utilizes a flexible prompt generator to create prompts based on the ad type and tone specified in the request.
19
+ The generator can handle both standard and streaming responses, providing detailed ad content along with product information and settings.
20
+ """
21
+
22
+ def __init__(self) -> None:
23
+ """
24
+ Initializes the AIAdGenerator with an OpenAI client and a flexible prompt generator.
25
+ """
26
+ self.llm = OpenAIClient()
27
+ self.prompt = FlexibleAdPromptGenerator()
28
+
29
+ async def generate(self, request: AdGenerationRequest, **kwargs) -> AdGenerationResponse:
30
+ """
31
+ Generates an advertisement based on the provided request.
32
+ Parameters:
33
+ - request: AdGenerationRequest containing product details and ad settings.
34
+ Returns:
35
+ - AdGenerationResponse with generated ad content and metadata.
36
+ """
37
+ start = time()
38
+ identifier = generate_request_id()
39
+
40
+ try:
41
+ system_prompt = self.prompt.generate_prompt(
42
+ ad_type=request.ad_type,
43
+ ad_tone=request.ad_tone,
44
+ )
45
+ product_data = request.model_dump(exclude={"ad_type", "ad_tone"})
46
+ product_str = "\n".join(f"{k}: {v}" for k, v in product_data.items() if v is not None)
47
+
48
+
49
+ ad_content = await self.llm.generate_text(
50
+ system=system_prompt,
51
+ data_product=product_str,
52
+ max_tokens=1000,
53
+ temperature=1.0,
54
+ **kwargs
55
+ )
56
+
57
+ generation_time = time() - start
58
+
59
+ return AdGenerationResponse(
60
+ ad_content=ad_content,
61
+ product_info=ProductInfo(
62
+ product_name=request.product_name,
63
+ brand=request.brand_name,
64
+ category=request.category,
65
+ description=request.description,
66
+ price=request.price,
67
+ discounted_price=request.discounted_price,
68
+ store_link=request.product_url,
69
+ ),
70
+ ad_settings=AdSettings(
71
+ ad_type=request.ad_type,
72
+ ad_tone=request.ad_tone,
73
+ ),
74
+ generation_time=generation_time,
75
+ model_used=self.llm.model_name,
76
+ request_id=identifier
77
+ )
78
+ except Exception as e:
79
+ raise Exception(f"Ad generation failed: {str(e)}")
80
+
81
+ async def generate_streaming(self, request: AdGenerationRequest, **kwargs) -> AsyncIterator[Dict[str, Any]]:
82
+ """
83
+ Generates an advertisement with streaming response based on the provided request.
84
+ Parameters:
85
+ - request: AdGenerationRequest containing product details and ad settings.
86
+ Returns:
87
+ - AsyncIterator yielding chunks of generated ad content and metadata.
88
+ """
89
+ start = time()
90
+ identifier = generate_request_id()
91
+
92
+ try:
93
+ yield {
94
+ "status": "processing",
95
+ "message": "Generating your advertisement...",
96
+ "request_id": identifier
97
+ }
98
+
99
+ accumulate_content = ""
100
+ product_data = request.model_dump(exclude={"ad_type", "ad_tone"})
101
+ product_str = "\n".join(f"{k}: {v}" for k, v in product_data.items() if v is not None)
102
+
103
+ system_prompt = self.prompt.generate_prompt(
104
+ ad_type=request.ad_type,
105
+ ad_tone=request.ad_tone,
106
+ )
107
+
108
+ async for chunk in self.llm.generate_text_streaming(
109
+ system=system_prompt,
110
+ data_product=product_str,
111
+ max_tokens=1000,
112
+ temperature=1.0,
113
+ stream=True,
114
+ **kwargs
115
+ ):
116
+ accumulate_content += chunk
117
+ yield {
118
+ "status": "streaming",
119
+ "content": chunk,
120
+ "progress": min(len(accumulate_content) / 500 * 100, 95) # Rough progress estimate
121
+ }
122
+ # Final response after streaming is complete
123
+ yield {
124
+ "status": "completed",
125
+ "ad_content": accumulate_content,
126
+ "product_info": {
127
+ "product_name": request.product_name,
128
+ "brand": request.brand_name,
129
+ "category": request.category,
130
+ "description": request.description,
131
+ "price": request.price,
132
+ "discounted_price": request.discounted_price,
133
+ "store_link": request.product_url,
134
+ },
135
+ "ad_settings": {
136
+ "ad_type": request.ad_type,
137
+ "ad_tone": request.ad_tone,
138
+ },
139
+ "generation_time": time() - start,
140
+ "model_used": self.llm.model_name,
141
+ "request_id": identifier
142
+ }
143
+ except Exception as e:
144
+ yield {
145
+ "status": "error",
146
+ "message": str(e),
147
+ "error_code": "generation_failed",
148
+ "request_id": identifier
149
+ }
src/core/base.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import AsyncIterator, Dict, Any
3
+ from src.models.requests import AdGenerationRequest
4
+
5
+
6
+ class BaseAdGenerator(ABC):
7
+ """Abstract base class for ad generators"""
8
+
9
+ @abstractmethod
10
+ async def generate(self, request: AdGenerationRequest, **kwargs):
11
+ """Generate advertisement"""
12
+ pass
13
+
14
+ @abstractmethod
15
+ async def generate_streaming(self, request: AdGenerationRequest, **kwargs) -> AsyncIterator[Dict[str, Any]]:
16
+ """Generate advertisement with streaming response"""
17
+ pass
src/core/image_generator.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ from typing import Optional
5
+ from loguru import logger
6
+
7
+ from src.llm.gemini_client import GeminiImageGeneration
8
+ from src.prompts.imagen_prompt import IMAGEN_PROMPT_TEMPLATE
9
+ from src.config import settings
10
+
11
+
12
+ class ImageGenerator:
13
+ """
14
+ Class to handle image generation using Gemini Image Generation API.
15
+ This class provides methods to generate image prompts based on product details
16
+ and save the generated images to a specified directory.
17
+ """
18
+ def __init__(self) -> None:
19
+ self.imagen = GeminiImageGeneration()
20
+ self.prompt_template = IMAGEN_PROMPT_TEMPLATE
21
+ self.save_dir = Path(settings.UPLOAD_DIR)
22
+ self.save_dir.mkdir(parents=True, exist_ok=True)
23
+
24
+ def generate_image_prompt(
25
+ self,
26
+ product_name: str,
27
+ brand_name: str,
28
+ product_description: str
29
+ ) -> Optional[str]:
30
+ """
31
+ Generate image prompt for the given product details.
32
+
33
+ Args:
34
+ product_name: Name of the product
35
+ brand_name: Brand name of the product
36
+ product_description: Description of the product
37
+
38
+ Returns:
39
+ Generated image prompt string
40
+ """
41
+ try:
42
+ file_name = f"{product_name.replace(' ', '_')}_{brand_name.replace(' ', '_')}.png"
43
+ file_path = self.save_dir / file_name
44
+ prompt = self.prompt_template.format(
45
+ product_name=product_name,
46
+ brand_name=brand_name,
47
+ product_description=product_description
48
+ )
49
+
50
+ response = self.imagen.generate_image(prompt=prompt)
51
+
52
+ for part in response.candidates[0].content.parts:
53
+ if part.text is not None:
54
+ logger.info(f"Generated text: {part.text}")
55
+ elif part.inline_data is not None:
56
+ image_data = part.inline_data.data
57
+ image = Image.open(BytesIO(image_data))
58
+ image.save(file_path)
59
+ logger.info(f"Image saved to: {file_path}")
60
+ return str(file_path)
61
+ except Exception as e:
62
+ logger.error(f"Error generating image prompt: {e}")
63
+ raise RuntimeError(f"Failed to generate image prompt: {e}")
64
+
src/llm/__init__.py ADDED
File without changes
src/llm/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (164 Bytes). View file
 
src/llm/__pycache__/base.cpython-312.pyc ADDED
Binary file (1.76 kB). View file
 
src/llm/__pycache__/gemini_client.cpython-312.pyc ADDED
Binary file (3.57 kB). View file
 
src/llm/__pycache__/openai_client.cpython-312.pyc ADDED
Binary file (5.23 kB). View file
 
src/llm/base.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import AsyncIterator
3
+
4
+ class BaseLLMClient(ABC):
5
+ """Abstract base class for LLM clients"""
6
+
7
+ @abstractmethod
8
+ async def generate_text(
9
+ self,
10
+ prompt: str,
11
+ max_tokens: int = 1000,
12
+ temperature: float = 1,
13
+ **kwargs
14
+ ) -> str:
15
+ """Generate text response"""
16
+ pass
17
+
18
+ @abstractmethod
19
+ async def generate_text_streaming(
20
+ self,
21
+ prompt: str,
22
+ max_tokens: int = 1000,
23
+ temperature: float = 1,
24
+ stream: bool = True,
25
+ **kwargs
26
+ ) -> AsyncIterator[str]:
27
+ """Generate text with streaming response"""
28
+ pass
29
+
30
+ @abstractmethod
31
+ async def health_check(self) -> bool:
32
+ """Check if the LLM service is healthy"""
33
+ pass
34
+
35
+ @abstractmethod
36
+ async def generate_image(
37
+ self,
38
+ prompt: str,
39
+ max_tokens: int = 1000,
40
+ temperature: float = 1,
41
+ **kwargs
42
+ ) -> str:
43
+ """Generate image based on the prompt"""
44
+ pass
src/llm/gemini_client.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from loguru import logger
3
+ from typing import Optional, Tuple, Union
4
+ from google import genai
5
+ from google.genai import types as genai_types
6
+
7
+ from src.llm.base import BaseLLMClient
8
+ from src.config import settings
9
+
10
+
11
+ class GeminiImageGeneration(BaseLLMClient):
12
+ """
13
+ GeminiImageGeneration is a client for interacting with the Gemini 2.0 Flash Preview Image Generation
14
+ It supports generating images based on text prompts.
15
+ It requires an API key and a model name to be initialized.
16
+ """
17
+
18
+ def __init__(self, api_key: Optional[str] = None, model_name: Optional[str] = None) -> None:
19
+
20
+ self.api_key = api_key or settings.GEMINI_API_KEY
21
+ self.model_name = model_name or settings.GEMINI_IMAGE_MODEL_NAME
22
+
23
+ if not self.api_key:
24
+ raise ValueError("Gemini API key is required")
25
+
26
+ self.client = genai.Client(api_key=self.api_key)
27
+
28
+ def generate_image(
29
+ self,
30
+ prompt: str,
31
+ ) -> Optional[Union[Image.Image, Tuple[Image.Image, str]]]:
32
+ """
33
+ Generate image using Gemini 2.0 Flash Preview Image Generation
34
+
35
+ Args:
36
+ prompt: Text prompt for image generation
37
+ save_path: Optional path to save the image
38
+
39
+ Returns:
40
+ PIL Image object or tuple of (Image, saved_path) if save_path provided
41
+ """
42
+ try:
43
+ logger.info(f"Generating image with prompt: {prompt[:100]}...")
44
+
45
+ # Generate content with Gemini
46
+ response = self.client.models.generate_content(
47
+ model=self.model_name,
48
+ contents=prompt,
49
+ config=genai_types.GenerateContentConfig(
50
+ response_modalities=['TEXT', 'IMAGE'],
51
+ temperature=1.0,
52
+ top_p=1,
53
+ top_k=32,
54
+ max_output_tokens=1024,
55
+ )
56
+ )
57
+ return response
58
+ except Exception as e:
59
+ logger.error(f"Error generating image with Gemini: {e}")
60
+ return None
61
+
62
+ def generate_text(self):
63
+ pass
64
+
65
+ def generate_text_streaming(self):
66
+ pass
67
+
68
+ def health_check(self):
69
+ return super().health_check()
src/llm/openai_client.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from openai import AsyncOpenAI
3
+ from typing import Optional, AsyncIterator
4
+ from src.config import settings
5
+ from src.llm.base import BaseLLMClient
6
+
7
+
8
+ class OpenAIClient(BaseLLMClient):
9
+ """
10
+ OpenAIClient is a client for interacting with the OpenAI API.
11
+ It supports both synchronous and asynchronous operations for generating text and streaming responses.
12
+ It requires an API key and a model name to be initialized.
13
+ """
14
+
15
+ def __init__(self, api_key: Optional[str] = None, model_name: Optional[str] = None) -> None:
16
+ """
17
+ Initialize the OpenAIClient with an API key and model name.
18
+ If no API key is provided, it will use the one from settings.
19
+ If no model name is provided, it defaults to "google/gemma-3-12b-it".
20
+ """
21
+ self.api_key = api_key or settings.LUNOS_API_KEY
22
+ self.model_name = model_name or settings.DEFAULT_MODEL_NAME
23
+
24
+ if not self.api_key:
25
+ raise ValueError("Lunos API key is required")
26
+
27
+ self.client = AsyncOpenAI(
28
+ api_key=self.api_key,
29
+ base_url=settings.LUNOS_BASE_URL
30
+ )
31
+ async def generate_text(
32
+ self,
33
+ system: str,
34
+ data_product: str,
35
+ max_tokens: int = 1000,
36
+ temperature: float = 1.0,
37
+ **kwargs
38
+ ) -> str:
39
+ """Generate text using the OpenAI API"""
40
+ try:
41
+ response = await self.client.chat.completions.create(
42
+ model=self.model_name,
43
+ messages=[
44
+ {"role": "system", "content": system},
45
+ {"role": "user", "content": data_product}
46
+ ],
47
+ max_tokens=max_tokens,
48
+ temperature=temperature,
49
+ **kwargs
50
+ )
51
+
52
+ return response.choices[0].message.content.strip()
53
+
54
+ except openai.APIError as e:
55
+ raise Exception(f"Lunor API error: {str(e)}")
56
+ except Exception as e:
57
+ raise Exception(f"Text generation failed: {str(e)}")
58
+
59
+
60
+ async def generate_text_streaming(
61
+ self,
62
+ system: str,
63
+ data_product: str,
64
+ max_tokens: int = 1000,
65
+ temperature: float = 1.0,
66
+ stream: bool = True,
67
+ **kwargs
68
+ ) -> AsyncIterator[str]:
69
+ """Generate text with streaming response"""
70
+ try:
71
+ response = await self.client.chat.completions.create(
72
+ model=self.model_name,
73
+ messages=[
74
+ {"role": "system", "content": system},
75
+ {"role": "user", "content": data_product}
76
+ ],
77
+ max_tokens=max_tokens,
78
+ temperature=temperature,
79
+ stream=stream,
80
+ **kwargs
81
+ )
82
+ async for chunk in response:
83
+ if chunk.choices[0].delta.content:
84
+ yield chunk.choices[0].delta.content
85
+ except openai.APIError as e:
86
+ raise Exception(f"Lunor API error: {str(e)}")
87
+ except Exception as e:
88
+ raise Exception(f"Text generation streaming failed: {str(e)}")
89
+
90
+ async def health_check(self) -> bool:
91
+ """Check if OpenAI API is accessible"""
92
+ try:
93
+ # Simple test request
94
+ await self.client.chat.completions.create(
95
+ model=self.model,
96
+ messages=[{"role": "user", "content": "test"}],
97
+ max_tokens=1
98
+ )
99
+ return True
100
+ except Exception:
101
+ return False
102
+
103
+ def generate_image(self):
104
+ pass
src/main.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from loguru import logger
2
+ from datetime import datetime
3
+ from fastapi import FastAPI
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from src.api.v1.ad_routers import router as ad_router
6
+ from src.api.v1.imagen_router import router as imagen_router
7
+ from src.config import settings
8
+
9
+
10
+ logger.add("logger.log", rotation="10 MB", retention="10 days", level="DEBUG")
11
+
12
+ app = FastAPI(
13
+ title=settings.APP_NAME,
14
+ version=settings.APP_VERSION,
15
+ debug=settings.DEBUG,
16
+ description=settings.APP_DESCRIPTION,
17
+ )
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=["*"],
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
25
+
26
+ # add router healthcheck
27
+ @app.get("/healthcheck")
28
+ async def health_check():
29
+ """Health check endpoint"""
30
+ return {
31
+ "status": "healthy",
32
+ "timestamp": datetime.now().isoformat(),
33
+ "version": settings.APP_VERSION,
34
+ "service": settings.APP_NAME
35
+ }
36
+
37
+
38
+ # Register API router
39
+ app.include_router(ad_router)
40
+ app.include_router(imagen_router)
src/models/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .requests import AdGenerationRequest
2
+ from .response import AdGenerationResponse, ProductInfo, AdSettings
3
+
4
+ __all__ = [
5
+ "AdGenerationRequest",
6
+ "AdGenerationResponse",
7
+ "ProductInfo",
8
+ "AdSettings"
9
+ ]
src/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (368 Bytes). View file
 
src/models/__pycache__/ad.cpython-312.pyc ADDED
Binary file (2.12 kB). View file
 
src/models/__pycache__/requests.cpython-312.pyc ADDED
Binary file (2.41 kB). View file
 
src/models/__pycache__/response.cpython-312.pyc ADDED
Binary file (2.46 kB). View file
 
src/models/requests.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ from pydantic import BaseModel, Field
3
+ from enum import Enum
4
+
5
+
6
+ class AdGenerationRequest(BaseModel):
7
+ """Product input from user form"""
8
+ # product information
9
+ product_name: str = Field(..., min_length=1, max_length=200)
10
+ brand_name: str = Field(None, max_length=100)
11
+ category: List[str] = Field(..., min_length=1, max_length=100)
12
+ description: str = Field(None)
13
+ product_url: Optional[str] = Field(None, max_length=1000)
14
+
15
+ # price
16
+ price: Optional[float] = Field(None, gt=0)
17
+ discounted_price: Optional[float] = Field(None, gt=0)
18
+
19
+ # type and tone
20
+ ad_type: Optional[str] = Field(None, max_length=50)
21
+ ad_tone: Optional[str] = Field(None, max_length=50)
22
+
23
+
24
+ class ImageGenerationRequest(BaseModel):
25
+ """Request model for standalone image generation"""
26
+ product_name: str = Field(..., min_length=1, max_length=200)
27
+ brand_name: str = Field(None, max_length=100)
28
+ description: str = Field(None)
29
+
30
+
31
+ class AdType(str, Enum):
32
+ SOCIAL_MEDIA = "social_media"
33
+ EMAIL = "email"
34
+ PRODUCT_DESCRIPTION = "product_description"
35
+
36
+ class AdTone(str, Enum):
37
+ FRIENDLY = "friendly"
38
+ PROFESSIONAL = "professional"
39
+ URGENT = "urgent"
40
+ PLAYFUL = "playful"
41
+ LUXURIOUS = "luxurious"
42
+ MINIMALIST = "minimalist"
43
+ BOLD = "bold"
44
+ CONVERSATIONAL = "conversational"
src/models/response.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from datetime import datetime
3
+ from typing import Optional, Literal, List
4
+ from decimal import Decimal
5
+
6
+ class ProductInfo(BaseModel):
7
+ """Product information"""
8
+ product_name: str
9
+ brand: Optional[str]
10
+ category: List[str]
11
+ description: str
12
+ price: Optional[Decimal]
13
+ discounted_price: Optional[Decimal]
14
+ store_link: str
15
+
16
+ class AdSettings(BaseModel):
17
+ """Settings for ad generation"""
18
+ ad_type: str = Field(..., description="Type of advertisement")
19
+ ad_tone: str = Field(..., description="Tone of the advertisement")
20
+
21
+ class ImageResult(BaseModel):
22
+ """Internal model for image processing results"""
23
+ image_path: Optional[str] = None # Local file path (for uploaded/generated)
24
+ image_url: Optional[str] = None # URL for accessing the image
25
+ source: str # "uploaded", "url", "generated"
26
+ generated: bool = False # True if AI generated
27
+
28
+ class AdGenerationResponse(BaseModel):
29
+ """Complete response for ad generation"""
30
+
31
+ # Main content
32
+ ad_content: str = Field(description="Generated advertisement content")
33
+
34
+ # Metadata
35
+ product_info: ProductInfo
36
+ ad_settings: AdSettings
37
+ # image_info: ImageInfo
38
+
39
+ # Generation metadata
40
+ generation_time: float = Field(description="Total generation time in seconds")
41
+ model_used: str = Field(description="AI model used for generation")
42
+ request_id: str = Field(description="Unique request identifier")
43
+ timestamp: datetime = Field(default_factory=datetime.now)
src/prompts/__init__.py ADDED
File without changes