File size: 1,630 Bytes
6026000
 
 
 
 
 
 
 
 
0f36a29
6026000
0f0e9c7
8a95db3
0f36a29
6026000
 
0f36a29
6026000
0f36a29
 
6026000
 
 
 
 
0f0e9c7
 
 
 
 
6026000
 
 
 
 
0f0e9c7
6026000
 
 
 
0f0e9c7
 
6026000
 
0f36a29
a62eb9c
6026000
 
0f0e9c7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import torch
from config import Config

class Generator:
    def __init__(self, model_handler):
        self.mh = model_handler

    def predict(
        self, 
        user_prompt,
        negative_prompt="",
        aspect_ratio="1:1",
        guidance_scale=1.6,
        num_inference_steps=8,
        seed=-1
    ):
        # 1. Construct Prompt
        if not user_prompt.strip():
            # Fallback if user provides empty prompt
            final_prompt = f"{Config.STYLE_TRIGGER}, a beautiful landscape, pixel art"
        else:
            final_prompt = f"{Config.STYLE_TRIGGER}, {user_prompt}"
            
        print(f"Prompt: {final_prompt}")
        
        # 2. Get dimensions from aspect ratio
        width, height = Config.ASPECT_RATIOS.get(aspect_ratio, Config.ASPECT_RATIOS[Config.DEFAULT_ASPECT_RATIO])
        print(f"Aspect Ratio: {aspect_ratio} ({width}x{height})")
        
        # 3. Handle Seed
        if seed == -1 or seed is None:
            seed = torch.Generator().seed()
        generator = torch.Generator(device=Config.DEVICE).manual_seed(int(seed))
        print(f"Using seed: {seed}")

        # 4. Run Text-to-Image Inference
        print("Running pipeline...")
        result = self.mh.pipeline(
            prompt=final_prompt,
            negative_prompt=negative_prompt,
            width=width,
            height=height,
            generator=generator,
            num_inference_steps=num_inference_steps, 
            guidance_scale=guidance_scale,
            clip_skip=1, # Optional, often helps with anime/pixel styles
        ).images[0]
        
        return result