Spaces:
Sleeping
Sleeping
Create models.py
Browse files
models.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# models.py
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from typing import Optional, List
|
| 4 |
+
|
| 5 |
+
class IdeaRequest(BaseModel):
|
| 6 |
+
prompt: str = Field(..., description="The base topic or instruction for idea generation (e.g., 'Generate 5 blog post ideas about sustainable travel')")
|
| 7 |
+
max_length: int = Field(100, description="Maximum number of tokens for the generated idea(s)")
|
| 8 |
+
num_ideas: int = Field(1, description="How many distinct ideas to attempt generating (model might merge them)")
|
| 9 |
+
|
| 10 |
+
class IdeaResponse(BaseModel):
|
| 11 |
+
ideas: List[str]
|
| 12 |
+
model_name: str
|
| 13 |
+
|
| 14 |
+
class ImageRequest(BaseModel):
|
| 15 |
+
prompt: str = Field(..., description="Detailed description of the image to generate")
|
| 16 |
+
negative_prompt: Optional[str] = Field(None, description="Concepts to avoid in the image")
|
| 17 |
+
height: int = Field(512, description="Image height in pixels")
|
| 18 |
+
width: int = Field(512, description="Image width in pixels")
|
| 19 |
+
num_inference_steps: int = Field(30, description="Number of diffusion steps (higher=more detail, slower)")
|
| 20 |
+
guidance_scale: float = Field(7.5, description="How strongly the prompt guides generation")
|
| 21 |
+
|
| 22 |
+
class ImageResponse(BaseModel):
|
| 23 |
+
image_base64: str # Base64 encoded image data
|
| 24 |
+
prompt: str
|
| 25 |
+
model_name: str
|
| 26 |
+
format: str = "PNG"
|
| 27 |
+
|
| 28 |
+
class VideoRequest(BaseModel):
|
| 29 |
+
image_base64: str = Field(..., description="Base64 encoded input image")
|
| 30 |
+
prompt: Optional[str] = Field(None, description="Optional prompt to guide video generation (if model supports)")
|
| 31 |
+
motion_bucket_id: int = Field(127, description="Controls amount of motion (model specific, e.g., Zeroscope)")
|
| 32 |
+
noise_aug_strength: float = Field(0.02, description="Amount of noise added to input (model specific)")
|
| 33 |
+
num_frames: int = Field(24, description="Number of frames in the generated video")
|
| 34 |
+
fps: int = Field(8, description="Frames per second for the output video")
|
| 35 |
+
num_inference_steps: int = Field(25, description="Number of diffusion steps")
|
| 36 |
+
guidance_scale: float = Field(7.0, description="Guidance scale")
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class VideoResponse(BaseModel):
|
| 40 |
+
video_base64: str # Base64 encoded video data
|
| 41 |
+
model_name: str
|
| 42 |
+
format: str = "MP4" # Or GIF, depending on output
|