File size: 2,178 Bytes
dc2099d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# models.py
from pydantic import BaseModel, Field
from typing import Optional, List

class IdeaRequest(BaseModel):
    prompt: str = Field(..., description="The base topic or instruction for idea generation (e.g., 'Generate 5 blog post ideas about sustainable travel')")
    max_length: int = Field(100, description="Maximum number of tokens for the generated idea(s)")
    num_ideas: int = Field(1, description="How many distinct ideas to attempt generating (model might merge them)")

class IdeaResponse(BaseModel):
    ideas: List[str]
    model_name: str

class ImageRequest(BaseModel):
    prompt: str = Field(..., description="Detailed description of the image to generate")
    negative_prompt: Optional[str] = Field(None, description="Concepts to avoid in the image")
    height: int = Field(512, description="Image height in pixels")
    width: int = Field(512, description="Image width in pixels")
    num_inference_steps: int = Field(30, description="Number of diffusion steps (higher=more detail, slower)")
    guidance_scale: float = Field(7.5, description="How strongly the prompt guides generation")

class ImageResponse(BaseModel):
    image_base64: str # Base64 encoded image data
    prompt: str
    model_name: str
    format: str = "PNG"

class VideoRequest(BaseModel):
    image_base64: str = Field(..., description="Base64 encoded input image")
    prompt: Optional[str] = Field(None, description="Optional prompt to guide video generation (if model supports)")
    motion_bucket_id: int = Field(127, description="Controls amount of motion (model specific, e.g., Zeroscope)")
    noise_aug_strength: float = Field(0.02, description="Amount of noise added to input (model specific)")
    num_frames: int = Field(24, description="Number of frames in the generated video")
    fps: int = Field(8, description="Frames per second for the output video")
    num_inference_steps: int = Field(25, description="Number of diffusion steps")
    guidance_scale: float = Field(7.0, description="Guidance scale")


class VideoResponse(BaseModel):
    video_base64: str # Base64 encoded video data
    model_name: str
    format: str = "MP4" # Or GIF, depending on output