File size: 24,933 Bytes
bce4c09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
"""
Agent Brown Workflow - Streamlined for hackathon demo
"""

import os
import json
import asyncio
import time
from typing import Dict, List, Optional, Any, Sequence
from datetime import datetime
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from agents.brown import AgentBrown, StoryboardRequest
from agents.brown_tools import create_brown_tools
from agents.bayko_workflow import create_agent_bayko
from llama_index.core.agent import ReActAgent
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.tools import FunctionTool, BaseTool
from llama_index.core.llms import (
    ChatMessage,
    ImageBlock,
    TextBlock,
    MessageRole,
)
from llama_index.core.workflow import (
    Event,
    StartEvent,
    StopEvent,
    Workflow,
    step,
)
from pydantic import Field

from llama_index.core.llms.llm import ToolSelection
from llama_index.core.tools.types import ToolOutput
from llama_index.core.workflow import Context
from prompts.brown_workflow_system_prompt import BROWN_WORKFLOW_SYSTEM_PROMPT


# Global LLM throttle
_llm_lock = asyncio.Lock()
_last_llm_time = 0


async def throttle_llm():
    global _last_llm_time
    async with _llm_lock:
        now = time.time()
        wait = max(0, 21 - (now - _last_llm_time))
        if wait > 0:
            await asyncio.sleep(wait)
        _last_llm_time = time.time()


# Workflow Events
class InputEvent(Event):
    def __init__(self, input: list):
        super().__init__()
        self.input = input


class StreamEvent(Event):
    def __init__(self, delta: str):
        super().__init__()
        self.delta = delta


class ToolCallEvent(Event):
    def __init__(self, tool_calls: list):
        super().__init__()
        self.tool_calls = tool_calls


class FunctionOutputEvent(Event):
    def __init__(self, output):
        super().__init__()
        self.output = output


# Custom Events for Comic Generation Workflow
class ComicGeneratedEvent(Event):
    """Event triggered when Bayko completes comic generation"""

    def __init__(self, bayko_response: dict, enhanced_prompt: str):
        super().__init__()
        self.bayko_response = bayko_response
        self.enhanced_prompt = enhanced_prompt


class CritiqueStartEvent(Event):
    """Event to start Brown's critique/judging of Bayko's work"""

    def __init__(self, comic_data: dict, original_prompt: str):
        super().__init__()
        self.comic_data = comic_data
        self.original_prompt = original_prompt


class WorkflowPauseEvent(Event):
    """Event to pause workflow for a specified duration"""

    def __init__(
        self, duration_seconds: int = 600, message: str = "Workflow paused"
    ):
        super().__init__()
        self.duration_seconds = duration_seconds
        self.message = message


class BrownFunctionCallingAgent(Workflow):
    """
    Agent Brown Function Calling Workflow using LlamaIndex Workflow pattern

    BROWN'S RESPONSIBILITIES:
    - Validate user input
    - Process and enhance requests
    - Coordinate with Bayko (pass messages)
    - Review Bayko's output using multimodal analysis
    - Make approval decisions (APPROVE/REFINE/REJECT)
    - Manage iteration loop (max 2 refinements)
    """

    def __init__(
        self,
        *args: Any,
        llm: OpenAIMultiModal | None = None,
        tools: List[BaseTool] | None = None,
        max_iterations: int = 1,  # Force only one iteration
        openai_api_key: Optional[str] = None,
        timeout: Optional[int] = None,
        **kwargs: Any,
    ) -> None:
        super().__init__(*args, timeout=timeout, **kwargs)

        self.max_iterations = 1  # Force only one iteration

        # Initialize multimodal LLM for Brown (GPT-4V for image analysis)
        self.llm = llm or OpenAIMultiModal(
            model="gpt-4o",
            api_key=openai_api_key or os.getenv("OPENAI_API_KEY"),
            temperature=0.7,
            max_tokens=2048,
            additional_kwargs={"tool_choice": "required"},
        )

        # Ensure it's a function calling model
        assert self.llm.metadata.is_function_calling_model

        # Create ONLY Brown's tools (validation, processing, review)
        self.tools = tools or self._create_brown_tools()

        # Initialize Bayko workflow for content generation
        self.bayko_workflow = create_agent_bayko(openai_api_key=openai_api_key)

    def _create_brown_tools(self) -> List[FunctionTool]:
        """Create ONLY Brown's tools - validation, processing, review, coordination"""

        # Get Brown's core tools (validation, processing, review)
        brown_tools_instance = create_brown_tools(self.max_iterations)
        brown_tools = brown_tools_instance.create_llamaindex_tools()

        # Add coordination tool to communicate with Bayko
        async def coordinate_with_bayko_tool(
            enhanced_prompt: str,
            style_tags: str = "[]",
            panels: int = 4,
            language: str = "english",
            extras: str = "[]",
        ) -> str:
            """
            Send the enhanced comic request to Agent Bayko for actual comic content generation.
            This is the ONLY way to generate comic panels and story content.
            Always use this tool when the user prompt or workflow requires new comic content.

            Arguments:
                enhanced_prompt: The improved user prompt for the comic.
                style_tags: JSON list of style tags (e.g., '["manga", "noir"]').
                panels: Number of comic panels to generate.
                language: Language for the comic.
                extras: JSON list of extra instructions.

            Returns:
                JSON string with Bayko's response and status. Example:
                '{"status": "bayko_generation_complete", "bayko_response": {...}, ...}'
            """
            try:
                # Parse inputs
                style_list = json.loads(style_tags) if style_tags else []
                extras_list = json.loads(extras) if extras else []

                # Create message for Bayko
                bayko_request = {
                    "enhanced_prompt": enhanced_prompt,
                    "style_tags": style_list,
                    "panels": panels,
                    "language": language,
                    "extras": extras_list,
                    "session_id": "brown_coordination",
                }

                # Call Bayko workflow to generate content
                # If Bayko is async, use: bayko_result = await self.bayko_workflow.process_generation_request(bayko_request)
                bayko_result = self.bayko_workflow.process_generation_request(
                    bayko_request
                )

                return json.dumps(
                    {
                        "status": "bayko_generation_complete",
                        "bayko_response": bayko_result,
                        "panels_generated": panels,
                        "coordination_successful": True,
                    }
                )

            except Exception as e:
                print(f"[Brown] Bayko coordination failed: {e}")  # Debug log
                return json.dumps(
                    {
                        "status": "bayko_coordination_failed",
                        "error": str(e),
                        "coordination_successful": False,
                    }
                )

        # Add multimodal image analysis tool for Brown to judge Bayko's work
        def analyze_bayko_output_tool(
            bayko_response: str, original_prompt: str = ""
        ) -> str:
            """Analyze Bayko's generated content using Brown's multimodal capabilities."""
            try:
                # Parse Bayko's response
                bayko_data = json.loads(bayko_response)

                # Extract image URLs/paths for analysis
                image_urls = []
                panels = bayko_data.get("panels", [])

                for panel in panels:
                    if "image_url" in panel:
                        image_urls.append(panel["image_url"])
                    elif "image_path" in panel:
                        # Convert local path to file URL for analysis
                        image_urls.append(f"file://{panel['image_path']}")

                if not image_urls:
                    return json.dumps(
                        {
                            "analysis": "No images found in Bayko's output",
                            "decision": "REJECT",
                            "reason": "Missing visual content",
                        }
                    )

                # Create multimodal analysis prompt
                analysis_prompt = f"""Analyze this comic content generated by Agent Bayko.

Original User Prompt: "{original_prompt}"

Bayko's Generated Content: {json.dumps(bayko_data, indent=2)}

As Agent Brown, evaluate:
1. Visual Quality: Are the images well-composed and clear?
2. Style Consistency: Does it match the requested style?
3. Story Coherence: Do the panels tell a logical story?
4. Prompt Adherence: Does it fulfill the user's request?
5. Technical Quality: Are all assets properly generated?

Make a decision: APPROVE (ready for user), REFINE (needs improvement), or REJECT (start over)
Provide specific feedback for any issues."""

                # Create multimodal message with text and images
                from typing import Union

                blocks: List[Union[TextBlock, ImageBlock]] = [
                    TextBlock(text=analysis_prompt)
                ]

                # Add image blocks for visual analysis (limit to 4 for API constraints)
                for url in image_urls[:4]:
                    blocks.append(ImageBlock(url=url))

                msg = ChatMessage(role=MessageRole.USER, blocks=blocks)

                # Get Brown's multimodal analysis
                response = self.llm.chat(messages=[msg])
                response_text = str(response).lower()

                # Parse Brown's decision
                if (
                    "approve" in response_text
                    and "reject" not in response_text
                ):
                    decision = "APPROVE"
                elif "reject" in response_text:
                    decision = "REJECT"
                else:
                    decision = "REFINE"

                return json.dumps(
                    {
                        "analysis": str(response),
                        "decision": decision,
                        "images_analyzed": len(image_urls),
                        "multimodal_analysis": True,
                        "brown_judgment": True,
                    }
                )

            except Exception as e:
                return json.dumps(
                    {
                        "analysis": f"Analysis failed: {str(e)}",
                        "decision": "APPROVE",  # Default to approval on error
                        "error": str(e),
                    }
                )

        # Add Brown's coordination and analysis tools
        coordination_tools = [
            FunctionTool.from_defaults(
                async_fn=coordinate_with_bayko_tool,
                name="coordinate_with_bayko",
                description="Send the enhanced comic request to Agent Bayko for actual comic content generation. This is the ONLY way to generate comic panels and story content. Always use this tool when the user prompt or workflow requires new comic content. Returns a JSON string with Bayko's response and status.",
            ),
            FunctionTool.from_defaults(
                fn=analyze_bayko_output_tool,
                name="analyze_bayko_output",
                description="Analyze Bayko's generated content using Brown's multimodal capabilities. Make approval decision.",
            ),
        ]

        # Combine Brown's core tools with coordination tools
        all_brown_tools = brown_tools + coordination_tools
        return all_brown_tools

    @step
    async def prepare_chat_history(
        self, ctx: Context, ev: StartEvent
    ) -> InputEvent:
        """Prepare chat history and handle user input"""
        # Clear sources and initialize iteration counter
        await ctx.set("sources", [])
        await ctx.set("iteration_count", 0)

        # Store original user prompt in context
        await ctx.set("original_prompt", ev.input)

        # Check if memory is setup
        memory = await ctx.get("memory", default=None)
        if not memory:
            memory = ChatMemoryBuffer.from_defaults(llm=self.llm)

        # Add system message if not present
        if not memory.get() or memory.get()[0].role != "system":
            system_msg = ChatMessage(
                role="system", content=BROWN_WORKFLOW_SYSTEM_PROMPT
            )
            memory.put(system_msg)

        # Get user input and add to memory
        user_msg = ChatMessage(role="user", content=ev.input)
        memory.put(user_msg)

        # Update context with memory
        await ctx.set("memory", memory)

        # Return chat history
        return InputEvent(input=memory.get())

    @step
    async def enhance_and_send_to_bayko(
        self, ctx: Context, ev: StartEvent
    ) -> InputEvent:
        """
        SINGLE STEP: Enhance prompt with ONE LLM call and send to Bayko.
        NO TOOL CALLING LOOP - DIRECT PROCESSING ONLY.
        """
        original_prompt = ev.input
        print(f"Single-step processing: {original_prompt[:50]}...")

        # Create simple enhancement prompt (NO TOOL CALLING)
        enhancement_prompt = f"""Enhance this comic story prompt for visual storytelling:

Original: {original_prompt}

Provide ONLY an enhanced story description with visual details, mood, and style suggestions.
Keep it concise and focused on the core narrative.
DO NOT use any tools or functions - just return the enhanced prompt text."""

        try:
            # SINGLE LLM CALL - No tools, no streaming, no complexity
            print("πŸš€ Making SINGLE OpenAI call...")

            # Use simple chat completion without tools
            simple_llm = OpenAIMultiModal(
                model="gpt-4o",
                api_key=self.llm.api_key,
                temperature=0.7,
                max_tokens=500,  # Shorter response
                # NO tool_choice - allows free response
            )

            response = await simple_llm.achat(
                [ChatMessage(role="user", content=enhancement_prompt)]
            )

            enhanced_prompt = response.message.content or enhancement_prompt
            print(f"πŸš€ Enhanced: {enhanced_prompt[:100]}...")

            # Send directly to Bayko
            bayko_request = {
                "prompt": enhanced_prompt,
                "original_prompt": original_prompt,
                "style_tags": ["comic", "storytelling"],
                "panels": 4,
                "language": "english",
                "extras": [],
                "session_id": "hackathon_session",
            }

            print("πŸš€ Calling Bayko and waiting for image generation...")
            # PROPERLY AWAIT Bayko's async image generation
            bayko_result = self.bayko_workflow.process_generation_request(
                bayko_request
            )

            print("πŸš€ SUCCESS! Comic generated and images ready!")
            print("πŸ“ Check your storyboard folder for images and logs!")
            print("πŸŽ‰ Ready for next prompt!")

            # DON'T STOP - Let user enter new prompt
            return InputEvent(
                input=[
                    ChatMessage(
                        role="assistant",
                        content=f"βœ… Comic generated successfully! Enhanced prompt: {enhanced_prompt[:100]}... Images saved to storyboard folder. Ready for your next comic idea!",
                    )
                ]
            )

        except Exception as e:
            print(f"🚨Error: {e}")
            # DON'T STOP on error either - let user try again
            return InputEvent(
                input=[
                    ChatMessage(
                        role="assistant",
                        content=f"❌ Error generating comic: {str(e)}. Please try again with a different prompt.",
                    )
                ]
            )

    @step
    async def handle_llm_input(
        self, ctx: Context, ev: InputEvent
    ) -> ToolCallEvent | StopEvent:
        """Handle LLM input and determine if tools need to be called"""
        import asyncio

        chat_history = ev.input

        # Add system prompt for Agent Brown
        system_prompt = BROWN_WORKFLOW_SYSTEM_PROMPT

        # Add system message if not present
        if not chat_history or chat_history[0].role != "system":
            system_msg = ChatMessage(role="system", content=system_prompt)
            chat_history = [system_msg] + chat_history

        await throttle_llm()
        # Stream the response
        response_stream = await self.llm.astream_chat_with_tools(
            self.tools, chat_history=chat_history
        )
        async for response in response_stream:
            ctx.write_event_to_stream(StreamEvent(delta=response.delta or ""))

        # Save the final response, which should have all content
        memory = await ctx.get("memory")
        memory.put(response.message)
        await ctx.set("memory", memory)

        # Get tool calls
        tool_calls = self.llm.get_tool_calls_from_response(
            response, error_on_no_tool_call=True
        )

        if not tool_calls:
            # If no tool call, error out immediately
            raise RuntimeError(
                "Agent Brown did not use the required tool. The workflow will stop."
            )
        else:
            return ToolCallEvent(tool_calls=tool_calls)

    @step
    async def handle_tool_calls(
        self, ctx: Context, ev: ToolCallEvent
    ) -> InputEvent:
        """Handle tool calls with proper error handling (supports async tools)"""
        import inspect
        import asyncio

        tool_calls = ev.tool_calls
        tools_by_name = {tool.metadata.get_name(): tool for tool in self.tools}

        tool_msgs = []
        sources = await ctx.get("sources", default=[])

        # Call tools -- safely!
        for tool_call in tool_calls:
            tool = tools_by_name.get(tool_call.tool_name)
            if not tool:
                additional_kwargs = {
                    "tool_call_id": tool_call.tool_id,
                    "name": tool_call.tool_name,
                }
                tool_msgs.append(
                    ChatMessage(
                        role="tool",
                        content=f"Tool {tool_call.tool_name} does not exist",
                        additional_kwargs=additional_kwargs,
                    )
                )
                continue

            additional_kwargs = {
                "tool_call_id": tool_call.tool_id,
                "name": tool.metadata.get_name(),
            }

            try:
                # Check if tool is async and call appropriately
                if inspect.iscoroutinefunction(tool):
                    tool_output = await tool(**tool_call.tool_kwargs)
                else:
                    tool_output = tool(**tool_call.tool_kwargs)

                sources.append(tool_output)
                tool_msgs.append(
                    ChatMessage(
                        role="tool",
                        content=(
                            tool_output.content
                            if hasattr(tool_output, "content")
                            else str(tool_output)
                        ),
                        additional_kwargs=additional_kwargs,
                    )
                )
                # Throttle after each tool call
                await asyncio.sleep(21)
            except Exception as e:
                tool_msgs.append(
                    ChatMessage(
                        role="tool",
                        content=f"Encountered error in tool call: {e}",
                        additional_kwargs=additional_kwargs,
                    )
                )

        # Update memory
        memory = await ctx.get("memory")
        for msg in tool_msgs:
            memory.put(msg)

        await ctx.set("sources", sources)
        await ctx.set("memory", memory)

        chat_history = memory.get()
        return InputEvent(input=chat_history)


class BrownWorkflow:
    """
    Wrapper class for backward compatibility with existing code
    """

    def __init__(
        self, max_iterations: int = 1, openai_api_key: Optional[str] = None
    ):
        self.max_iterations = 1
        self.openai_api_key = openai_api_key

        # Get Brown's tools
        brown_tools = create_brown_tools(max_iterations)
        self.tools: Sequence[BaseTool] = brown_tools.create_llamaindex_tools()

        # Initialize LLM
        self.llm = OpenAIMultiModal(
            model="gpt-4o",
            api_key=openai_api_key or os.getenv("OPENAI_API_KEY"),
            temperature=0.7,
            max_tokens=2048,
            additional_kwargs={"tool_choice": "required"},
        )

    def reset(self):
        """Reset workflow state for a new session."""
        self.session_id = None
        # Reset agent instances
        brown_tools = create_brown_tools(self.max_iterations)
        self.tools = brown_tools.create_llamaindex_tools()
        # Reset LLM if needed
        self.llm = OpenAIMultiModal(
            model="gpt-4o",
            api_key=self.openai_api_key or os.getenv("OPENAI_API_KEY"),
            temperature=0.7,
            max_tokens=2048,
            additional_kwargs={"tool_choice": "required"},
        )

    async def process_comic_request_async(
        self, user_prompt: str
    ) -> Dict[str, Any]:
        """Async version of comic request processing"""
        try:
            # Create workflow agent
            workflow = BrownFunctionCallingAgent(
                llm=self.llm,
                tools=list(self.tools),  # Convert Sequence to List
                max_iterations=self.max_iterations,
                openai_api_key=self.openai_api_key,
                timeout=None,
                verbose=True,
            )

            # Run workflow with user prompt as input (LlamaIndex pattern)
            result = await workflow.run(input=user_prompt)

            # Parse response
            response = result.get("response")
            if not response:
                return {
                    "status": "error",
                    "error": "No response from workflow",
                }

            # Extract relevant data
            tool_outputs = result.get("sources", [])
            bayko_outputs = [
                out
                for out in tool_outputs
                if "bayko_response" in str(out.content)
            ]

            if not bayko_outputs:
                return {
                    "status": "error",
                    "error": "No content generated by Bayko",
                }

            # Get last Bayko output (final version)
            try:
                final_output = json.loads(bayko_outputs[-1].content)
                bayko_response = final_output.get("bayko_response", {})

                return {
                    "status": "success",
                    "bayko_response": bayko_response,
                    "decision": final_output.get("decision", "APPROVE"),
                    "analysis": final_output.get("analysis", ""),
                    "tool_outputs": [str(out.content) for out in tool_outputs],
                }
            except json.JSONDecodeError as e:
                return {
                    "status": "error",
                    "error": f"Failed to parse Bayko response: {str(e)}",
                }

        except Exception as e:
            return {"status": "error", "error": str(e)}

    def process_comic_request(self, user_prompt: str) -> Dict[str, Any]:
        """Synchronous version that runs the async version"""
        import asyncio

        return asyncio.run(self.process_comic_request_async(user_prompt))


def create_brown_workflow(
    max_iterations: int = 1, openai_api_key: Optional[str] = None
) -> BrownWorkflow:
    """
    Factory function to create and initialize Brown workflow
    """
    return BrownWorkflow(
        max_iterations=max_iterations, openai_api_key=openai_api_key
    )


# def create_brown_workflow(
#     max_iterations: int = 3,
#     openai_api_key: Optional[str] = None,
# ) -> BrownFunctionCallingAgent:
#     """Create and initialize Brown workflow"""

#     workflow = BrownFunctionCallingAgent(
#         max_iterations=max_iterations,
#         openai_api_key=openai_api_key,
#     )
#     return workflow