Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| import json | |
| from typing import Iterator | |
| class OllamaChat: | |
| def __init__(self, model_name: str = "llama2", base_url: str = "http://localhost:11434"): | |
| self.model_name = model_name | |
| self.base_url = base_url | |
| def generate_response(self, message: str, history: list = None) -> Iterator[str]: | |
| try: | |
| messages = [{"role": "system", "content": "You are a helpful assistant. Please respond to the user queries."}] | |
| if history: | |
| for human_msg, ai_msg in history: | |
| messages.append({"role": "user", "content": human_msg}) | |
| if ai_msg: | |
| messages.append({"role": "assistant", "content": ai_msg}) | |
| messages.append({"role": "user", "content": message}) | |
| response = requests.post( | |
| f"{self.base_url}/api/chat", | |
| json={"model": self.model_name, "messages": messages, "stream": True}, | |
| stream=True, | |
| timeout=60 | |
| ) | |
| if response.status_code != 200: | |
| yield f"[error] Failed to connect to Ollama server (Status: {response.status_code})" | |
| return | |
| full_response = "" | |
| for line in response.iter_lines(): | |
| if line: | |
| try: | |
| data = json.loads(line.decode('utf-8')) | |
| if 'message' in data and 'content' in data['message']: | |
| content = data['message']['content'] | |
| full_response += content | |
| yield full_response | |
| if data.get('done', False): | |
| break | |
| except json.JSONDecodeError: | |
| continue | |
| except requests.exceptions.RequestException as e: | |
| yield f"[error] Connection issue: {str(e)}" | |
| except Exception as e: | |
| yield f"[error] Unexpected error: {str(e)}" | |
| def create_chat_interface(): | |
| ollama_chat = OllamaChat() | |
| def respond(message, history_state): | |
| if not message.strip(): | |
| return gr.update(), gr.update(), history_state # no change | |
| response_generator = ollama_chat.generate_response(message, history_state) | |
| final_response = "" | |
| for response in response_generator: | |
| if response.startswith("[error]"): | |
| # Show error message as popup toast or side message | |
| return gr.update(), gr.update(value=message), gr.update(value=history_state) | |
| final_response = response | |
| history_state.append((message, final_response)) | |
| return gr.update(value=history_state), gr.update(value=""), gr.update(value=history_state) | |
| with gr.Blocks(title="LangChain Demo with Llama2", theme=gr.themes.Soft(), css=""" | |
| .gr-block { | |
| max-width: 960px; | |
| margin: auto; | |
| } | |
| @media (max-width: 768px) { | |
| #chatbot { height: 300px !important; } | |
| } | |
| """) as demo: | |
| gr.Markdown("# 🦙 LangChain Demo with Llama2 API") | |
| gr.Markdown("Chat with Llama2 using LangChain and Ollama") | |
| history_state = gr.State([]) | |
| chatbot = gr.Chatbot( | |
| value=[], | |
| elem_id="chatbot", | |
| elem_classes="chatbot-box", | |
| bubble_full_width=False, | |
| height=500 | |
| ) | |
| with gr.Column(): | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| placeholder="Enter your message here...", | |
| container=False, | |
| scale=6, | |
| label="Your Message" | |
| ) | |
| submit_btn = gr.Button("Send", scale=2, variant="primary") | |
| clear_btn = gr.Button("Clear", scale=2, variant="secondary") | |
| # Events | |
| msg.submit(respond, [msg, history_state], [chatbot, msg, history_state]) | |
| submit_btn.click(respond, [msg, history_state], [chatbot, msg, history_state]) | |
| clear_btn.click(lambda: ([], "", []), outputs=[chatbot, msg, history_state]) | |
| gr.Examples( | |
| examples=[ | |
| "What is artificial intelligence?", | |
| "Explain machine learning in simple terms", | |
| "Write a short poem about technology", | |
| "What are the benefits of renewable energy?" | |
| ], | |
| inputs=msg | |
| ) | |
| gr.Markdown(""" | |
| ### Instructions: | |
| 1. Type your question in the text box above | |
| 2. Click 'Send' or press Enter to get a response | |
| 3. Use 'Clear' to reset the conversation | |
| **Note**: This demo requires Ollama to be running with the Llama2 model installed. | |
| """) | |
| return demo | |
| if __name__ == "__main__": | |
| demo = create_chat_interface() | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False | |
| ) | |