Spaces:
Sleeping
Sleeping
File size: 5,269 Bytes
4327cda 8aac142 4327cda 8aac142 4327cda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import os
import gradio as gr
from PIL import Image
from typing import Tuple, Optional
from dotenv import load_dotenv
from .drying_agent import DryingAgent
# Load environment variables
load_dotenv()
class DryingApp:
def __init__(self):
"""Initialize the DryingApp with the agent and interface."""
self.agent = DryingAgent()
def process_interaction(
self,
message: str,
image: Optional[Image.Image],
history: list
) -> Tuple[list, Optional[Image.Image]]:
"""Process user interaction and update chat history."""
if not message.strip():
return history, None
try:
# Get response from agent
messages, processed_image = self.agent.process_message(message, image)
# Update history with proper message format for Gradio 4.19.2
if not history:
history = []
# Safely extract the assistant's response
assistant_response = messages[1]["content"] if len(messages) > 1 else "I apologize, but I couldn't process your message."
# Format as tuples (user_message, assistant_message) for Gradio 4.19.2 compatibility
history.append((message, assistant_response))
return history, processed_image
except Exception as e:
print(f"Error in process_interaction: {str(e)}")
if not history:
history = []
history.append({"role": "assistant", "content": f"Error: {str(e)}"})
return history, None
def reset_conversation(self):
"""Reset the conversation and agent state."""
self.agent.reset()
return [], None
def create_interface(self):
"""Create and configure the Gradio interface."""
with gr.Blocks(
title="Item Drying Assistant",
theme=gr.themes.Soft(
primary_hue="blue",
secondary_hue="gray"
),
css=".gradio-container {max-width: 1200px; margin: auto;}"
) as interface:
gr.Markdown("# Item Drying Assistant")
gr.Markdown("Upload an image of a wet item and I'll help you dry it!")
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(
label="Chat History",
height=400,
show_label=True,
layout="bubble",
rtl=False,
show_copy_button=True
)
with gr.Row():
message = gr.Textbox(
label="Your message",
placeholder="Type your message here...",
lines=2,
max_lines=10,
show_label=True,
container=True
)
submit = gr.Button("Send", variant="primary")
reset = gr.Button("Reset", variant="secondary")
with gr.Column(scale=1):
image_input = gr.Image(
label="Upload Image",
type="pil",
show_label=True,
container=True,
height=300,
sources=["upload", "clipboard"]
)
image_output = gr.Image(
label="Processed Image",
type="pil",
show_label=True,
container=True,
height=300
)
# Set up event handlers
submit.click(
fn=self.process_interaction,
inputs=[message, image_input, chatbot],
outputs=[chatbot, image_output],
api_name="process"
).then(
fn=lambda: "",
outputs=[message]
)
message.submit(
fn=self.process_interaction,
inputs=[message, image_input, chatbot],
outputs=[chatbot, image_output],
api_name="process_enter"
).then(
fn=lambda: "",
outputs=[message]
)
reset.click(
fn=self.reset_conversation,
inputs=[],
outputs=[chatbot, image_output],
api_name="reset"
)
return interface
def main():
"""Main function to run the application."""
app = DryingApp()
interface = app.create_interface()
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False, # Disable sharing to avoid cross-origin issues
show_error=True,
allowed_paths=["test_images"], # Allow access to test images
quiet=True # Reduce console output
)
if __name__ == "__main__":
main() |