File size: 5,351 Bytes
1160ead
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import gradio as gr
import torch
from PIL import Image
from diffusers import StableDiffusionXLPipeline, ControlNetModel, AutoencoderKL
from transformers import CLIPVisionModelWithProjection
import spaces
import os
import numpy as np

# Pre-defined head rotation angles (pitch, yaw in degrees)
VIEW_ANGLES = {
    "front": (0, 0),
    "3/4-left": (0, -35),
    "3/4-right": (0, 35),
    "profile-left": (0, -90),
    "profile-right": (0, 90),
}

# Global pipeline (loaded once)
pipe = None
ip_adapter = None

def load_pipeline():
    """Load the IP-Adapter FaceID Plus pipeline"""
    global pipe

    if pipe is not None:
        return pipe

    # Load IP-Adapter FaceID Plus with SDXL
    from diffusers import StableDiffusionXLPipeline
    from diffusers.utils import load_image

    # Base model - using a good anime-capable SDXL model
    model_id = "stabilityai/stable-diffusion-xl-base-1.0"

    pipe = StableDiffusionXLPipeline.from_pretrained(
        model_id,
        torch_dtype=torch.float16,
        variant="fp16",
    )

    # Load IP-Adapter FaceID Plus
    pipe.load_ip_adapter(
        "h94/IP-Adapter-FaceID",
        subfolder=None,
        weight_name="ip-adapter-faceid-plusv2_sdxl.bin",
        image_encoder_folder="models/image_encoder",
    )

    pipe.set_ip_adapter_scale(0.7)
    pipe.enable_model_cpu_offload()

    return pipe


@spaces.GPU
def generate_view(
    face_image: Image.Image,
    view_angle: str,
    prompt: str,
    negative_prompt: str = "",
    strength: float = 0.8,
    guidance_scale: float = 5.0,
    num_steps: int = 30,
    seed: int = 42,
) -> Image.Image:
    """
    Generate a character view from a reference face.

    Args:
        face_image: Reference face image (front view ideally)
        view_angle: One of 'front', '3/4-left', '3/4-right', 'profile-left', 'profile-right'
        prompt: Description of the character/style
        negative_prompt: What to avoid
        strength: IP-Adapter strength (0.0-1.0)
        guidance_scale: CFG scale
        num_steps: Number of inference steps
        seed: Random seed for reproducibility

    Returns:
        Generated image at the specified angle
    """

    pipe = load_pipeline()

    # Add view-specific prompt additions
    angle_prompts = {
        "front": "front view, facing camera, symmetrical",
        "3/4-left": "3/4 view, turned slightly left, three quarter view",
        "3/4-right": "3/4 view, turned slightly right, three quarter view",
        "profile-left": "side profile, facing left, profile view",
        "profile-right": "side profile, facing right, profile view",
    }

    view_prompt = angle_prompts.get(view_angle, "")
    full_prompt = f"{prompt}, {view_prompt}, portrait, character art, consistent character"

    default_negative = "deformed, ugly, bad anatomy, blurry, low quality, worst quality, text, watermark"
    full_negative = f"{negative_prompt}, {default_negative}" if negative_prompt else default_negative

    # Set seed for reproducibility
    generator = torch.Generator(device="cpu").manual_seed(seed)

    # Generate with IP-Adapter face reference
    result = pipe(
        prompt=full_prompt,
        negative_prompt=full_negative,
        ip_adapter_image=face_image,
        num_inference_steps=num_steps,
        guidance_scale=guidance_scale,
        generator=generator,
        width=1024,
        height=1024,
    )

    return result.images[0]


# Gradio Interface
with gr.Blocks(title="Character View Generator") as demo:
    gr.Markdown("""
    # Character View Generator

    Generate consistent character views from a reference face image.
    Designed for character sheet pipelines.
    """)

    with gr.Row():
        with gr.Column():
            face_input = gr.Image(label="Reference Face", type="pil")
            view_dropdown = gr.Dropdown(
                choices=list(VIEW_ANGLES.keys()),
                value="front",
                label="Target View Angle"
            )
            prompt_input = gr.Textbox(
                label="Prompt",
                placeholder="anime character, blue skin, orange eyes...",
                lines=2
            )
            negative_input = gr.Textbox(
                label="Negative Prompt (optional)",
                placeholder="realistic, photo...",
                lines=2
            )

            with gr.Row():
                strength_slider = gr.Slider(0.0, 1.0, value=0.8, label="Identity Strength")
                guidance_slider = gr.Slider(1.0, 20.0, value=5.0, label="Guidance Scale")

            with gr.Row():
                steps_slider = gr.Slider(10, 50, value=30, step=1, label="Steps")
                seed_input = gr.Number(value=42, label="Seed", precision=0)

            generate_btn = gr.Button("Generate View", variant="primary")

        with gr.Column():
            output_image = gr.Image(label="Generated View")

    generate_btn.click(
        fn=generate_view,
        inputs=[
            face_input,
            view_dropdown,
            prompt_input,
            negative_input,
            strength_slider,
            guidance_slider,
            steps_slider,
            seed_input,
        ],
        outputs=output_image,
        api_name="generate_view"  # Enables API access
    )


if __name__ == "__main__":
    demo.queue(api_open=True)
    demo.launch()