import gradio as gr from huggingface_hub import InferenceClient import os # Initialize the client client = InferenceClient( model="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", token=os.getenv("HF_TOKEN") ) # Default system prompts SYSTEM_PROMPTS = { "Default Assistant": "You are a helpful, harmless, and honest AI assistant. Provide clear, accurate, and thoughtful responses.", "Creative Writer": "You are a creative writing assistant. Help users with storytelling, poetry, and imaginative content. Be expressive and artistic.", "Code Helper": "You are an expert programmer. Help users write, debug, and understand code. Provide clear explanations and best practices.", "Socratic Teacher": "You are a Socratic teacher. Instead of giving direct answers, guide users to discover answers through thoughtful questions.", "Friendly Chat": "You are a friendly conversational partner. Be warm, engaging, and personable. Use casual language and show genuine interest.", "Custom": "" } def format_thinking(content): """Format thinking tags for display""" if "" in content: parts = content.split("" in part: think_content, rest = part.split("", 1) formatted += f"\n\n
💭 Thinking Process\n\n{think_content.strip()}\n\n
\n\n{rest}" else: formatted += part return formatted return content def chat(message, history, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking): """Main chat function with streaming support""" # Determine system prompt if system_prompt_choice == "Custom": system_content = custom_system_prompt if custom_system_prompt.strip() else SYSTEM_PROMPTS["Default Assistant"] else: system_content = SYSTEM_PROMPTS.get(system_prompt_choice, SYSTEM_PROMPTS["Default Assistant"]) # Build messages messages = [{"role": "system", "content": system_content}] # Add history for msg in history: if msg["role"] == "user": messages.append({"role": "user", "content": msg["content"]}) elif msg["role"] == "assistant": # Clean up thinking tags from history content = msg["content"] if "
" in content: # Remove the formatted thinking for API calls import re content = re.sub(r'
.*?
', '', content, flags=re.DOTALL) messages.append({"role": "assistant", "content": content.strip()}) # Add current message messages.append({"role": "user", "content": message}) try: response = "" stream = client.chat_completion( messages=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=True ) for chunk in stream: if chunk.choices[0].delta.content: response += chunk.choices[0].delta.content # Format thinking if enabled if show_thinking: yield format_thinking(response) else: # Hide thinking content display_response = response if "" in display_response: import re display_response = re.sub(r'', '', display_response, flags=re.DOTALL) else: # Still thinking, show placeholder display_response = "🤔 *Thinking...*" yield display_response.strip() except Exception as e: yield f"❌ Error: {str(e)}\n\nPlease check your HF_TOKEN and try again." def clear_chat(): """Clear the chat history""" return [], "" def export_chat(history): """Export chat history as text""" if not history: return "No chat history to export." export_text = "# Chat Export\n\n" for msg in history: role = "👤 User" if msg["role"] == "user" else "🤖 Assistant" export_text += f"## {role}\n{msg['content']}\n\n---\n\n" return export_text # Custom CSS css = """ .header-container { text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 12px; margin-bottom: 20px; } .header-container h1 { color: white; margin: 0; font-size: 2em; } .header-container p { color: rgba(255,255,255,0.9); margin: 10px 0 0 0; } .header-container a { color: #ffd700; text-decoration: none; font-weight: bold; } .header-container a:hover { text-decoration: underline; } .parameter-box { background: var(--background-fill-secondary); padding: 15px; border-radius: 8px; margin-top: 10px; } .chatbot-container { min-height: 500px; } footer { text-align: center; margin-top: 20px; padding: 10px; color: var(--body-text-color-subdued); } """ # Build the interface with gr.Blocks( title="DeepSeek R1 Chatbot", theme=gr.themes.Soft(), css=css, fill_height=True, footer_links=[ {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}, {"label": "Model", "url": "https://huggingface.co/deepseek-ai/DeepSeek-R1-0528-Qwen3-8B"} ] ) as demo: # Header gr.HTML("""

🧠 DeepSeek R1 Chatbot

Powered by DeepSeek-R1-0528-Qwen3-8B with reasoning capabilities

Built with anycoder

""") with gr.Row(): # Main chat column with gr.Column(scale=3): chatbot = gr.Chatbot( label="Chat", height=500, type="messages", show_copy_button=True, avatar_images=(None, "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg"), render_markdown=True, elem_classes=["chatbot-container"] ) with gr.Row(): msg = gr.Textbox( placeholder="Type your message here... (Press Enter to send)", label="Message", scale=4, lines=2, max_lines=5, autofocus=True ) send_btn = gr.Button("Send 📤", variant="primary", scale=1) with gr.Row(): clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary") regenerate_btn = gr.Button("🔄 Regenerate", variant="secondary") export_btn = gr.Button("📥 Export", variant="secondary") # Settings sidebar with gr.Column(scale=1): gr.Markdown("### ⚙️ Settings") with gr.Accordion("System Prompt", open=True): system_prompt_choice = gr.Dropdown( choices=list(SYSTEM_PROMPTS.keys()), value="Default Assistant", label="Preset Prompts", interactive=True ) custom_system_prompt = gr.Textbox( label="Custom System Prompt", placeholder="Enter your custom system prompt here...", lines=4, visible=False ) with gr.Accordion("Generation Parameters", open=False): temperature = gr.Slider( minimum=0.0, maximum=2.0, value=0.7, step=0.1, label="Temperature", info="Higher = more creative, Lower = more focused" ) max_tokens = gr.Slider( minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens", info="Maximum response length" ) top_p = gr.Slider( minimum=0.0, maximum=1.0, value=0.9, step=0.05, label="Top P", info="Nucleus sampling parameter" ) with gr.Accordion("Display Options", open=False): show_thinking = gr.Checkbox( value=True, label="Show Thinking Process", info="Display the model's reasoning steps" ) # Export output export_output = gr.Textbox( label="Exported Chat", lines=10, visible=False, show_copy_button=True ) # Examples gr.Markdown("### 💡 Example Prompts") gr.Examples( examples=[ ["Explain quantum computing in simple terms"], ["Write a haiku about artificial intelligence"], ["What's the time complexity of quicksort and why?"], ["Help me brainstorm ideas for a sustainable business"], ["Solve this step by step: If 3x + 7 = 22, what is x?"], ], inputs=msg, label="" ) # Event handlers def toggle_custom_prompt(choice): return gr.Textbox(visible=(choice == "Custom")) system_prompt_choice.change( toggle_custom_prompt, inputs=[system_prompt_choice], outputs=[custom_system_prompt] ) def user_message(message, history): if message.strip(): history.append({"role": "user", "content": message}) return "", history def bot_response(history, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking): if not history: yield history return user_msg = history[-1]["content"] history_for_api = history[:-1] history.append({"role": "assistant", "content": ""}) for response in chat(user_msg, history_for_api, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking): history[-1]["content"] = response yield history def regenerate(history, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking): if len(history) >= 2: # Remove last assistant message history = history[:-1] # Get last user message user_msg = history[-1]["content"] history_for_api = history[:-1] history.append({"role": "assistant", "content": ""}) for response in chat(user_msg, history_for_api, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking): history[-1]["content"] = response yield history else: yield history def show_export(history): export_text = export_chat(history) return gr.Textbox(visible=True, value=export_text) # Wire up events msg.submit( user_message, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False ).then( bot_response, inputs=[chatbot, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking], outputs=[chatbot] ) send_btn.click( user_message, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False ).then( bot_response, inputs=[chatbot, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking], outputs=[chatbot] ) clear_btn.click( clear_chat, outputs=[chatbot, msg] ) regenerate_btn.click( regenerate, inputs=[chatbot, system_prompt_choice, custom_system_prompt, temperature, max_tokens, top_p, show_thinking], outputs=[chatbot] ) export_btn.click( show_export, inputs=[chatbot], outputs=[export_output] ) if __name__ == "__main__": demo.launch()