Spaces:
Running
Running
| import gradio as gr | |
| import os | |
| from openai import OpenAI | |
| # Initialize OpenAI client | |
| client = OpenAI( | |
| base_url="https://router.huggingface.co/v1", | |
| api_key=os.environ["HF_TOKEN"], | |
| default_headers={ | |
| "X-HF-Bill-To": "huggingface" | |
| } | |
| ) | |
| def process_message(message, history, image): | |
| """ | |
| Process user message and image, send to the model, and return the response. | |
| """ | |
| # Prepare messages for the API | |
| messages = [] | |
| # Add chat history - Gradio 6 uses list of dicts format | |
| for msg_dict in history: | |
| if msg_dict["role"] == "user": | |
| messages.append({"role": "user", "content": msg_dict["content"]}) | |
| elif msg_dict["role"] == "assistant": | |
| messages.append({"role": "assistant", "content": msg_dict["content"]}) | |
| # Add current message and image | |
| if message or image: | |
| current_message = {"role": "user", "content": []} | |
| if message: | |
| current_message["content"].append({"type": "text", "text": message}) | |
| if image: | |
| current_message["content"].append({ | |
| "type": "image_url", | |
| "image_url": {"url": image} | |
| }) | |
| messages.append(current_message) | |
| # Add user message to history immediately | |
| new_history = history + [{"role": "user", "content": message if message else ""}] | |
| # Get response from the model | |
| response = "" | |
| try: | |
| stream = client.chat.completions.create( | |
| model="zai-org/GLM-4.6V-Flash:zai-org", | |
| messages=messages, | |
| stream=True, | |
| ) | |
| # Add assistant message to history | |
| new_history.append({"role": "assistant", "content": ""}) | |
| for chunk in stream: | |
| if chunk.choices[0].delta.content: | |
| response += chunk.choices[0].delta.content | |
| # Update the assistant's message | |
| new_history[-1]["content"] = response | |
| yield new_history | |
| except Exception as e: | |
| error_msg = f"Error: {str(e)}" | |
| new_history[-1]["content"] = error_msg | |
| yield new_history | |
| # Create Gradio interface | |
| with gr.Blocks() as demo: | |
| # Add the "Built with anycoder" link | |
| gr.Markdown( | |
| '<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="text-decoration: none;">' | |
| '<span style="color: #4F46E5; font-weight: bold;">Built with anycoder</span>' | |
| '</a>' | |
| ) | |
| chatbot = gr.Chatbot( | |
| label="Conversation", | |
| height=400, | |
| avatar_images=( | |
| "https://cdn-icons-png.flaticon.com/512/147/147144.png", | |
| "https://cdn-icons-png.flaticon.com/512/4712/4712025.png" | |
| ) | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| msg = gr.Textbox( | |
| label="Your message", | |
| placeholder="Type your message here...", | |
| lines=2 | |
| ) | |
| with gr.Column(scale=1): | |
| img = gr.Image( | |
| label="Upload image", | |
| type="filepath", | |
| height=150 | |
| ) | |
| with gr.Row(): | |
| submit_btn = gr.Button("Send", variant="primary") | |
| clear_btn = gr.ClearButton(components=[msg, img, chatbot], value="Clear Chat") | |
| # Set up the chat interface | |
| def submit_message(message, history, image): | |
| # Clear the textbox and image after submission | |
| for update in process_message(message, history, image): | |
| yield update | |
| msg.submit( | |
| fn=submit_message, | |
| inputs=[msg, chatbot, img], | |
| outputs=[chatbot], | |
| api_visibility="public" | |
| ).then( | |
| lambda: ("", None), | |
| outputs=[msg, img] | |
| ) | |
| submit_btn.click( | |
| fn=submit_message, | |
| inputs=[msg, chatbot, img], | |
| outputs=[chatbot], | |
| api_visibility="public" | |
| ).then( | |
| lambda: ("", None), | |
| outputs=[msg, img] | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False, | |
| theme=gr.themes.Soft(primary_hue="blue"), | |
| footer_links=[ | |
| { | |
| "label": "Built with anycoder", | |
| "url": "https://huggingface.co/spaces/akhaliq/anycoder" | |
| } | |
| ] | |
| ) |