import streamlit as st import os import uuid from streamlit_chat import message # HuggingFace integration from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace # Core LangChain schema from langchain_core.messages import ( AIMessage, HumanMessage, SystemMessage, BaseMessage ) # --- 1. Load Environment Variables --- HF_API_TOKEN = os.environ.get("HUGGINGFACEHUB_API_TOKEN") # --- 2. Global System Prompt (Chef Personality) --- SYSTEM_PROMPT = SystemMessage( content=( "You are Kitchen Buddy πŸ‘¨β€πŸ³, a warm and friendly culinary assistant. " "Your job is to help people with anything related to food, cooking, or cuisine.\n\n" "You can:\n" "- Explain what ingredients are, their uses, and their cultural background\n" "- Suggest recipes and meal ideas\n" "- Offer ingredient substitutions\n" "- Teach cooking techniques and science\n" "- Provide healthy diet adaptations\n" "- Explore global cuisines & traditions\n\n" "Keep your tone helpful and approachable. " "If a user asks about a food item (e.g., 'what are apples'), explain what it is and how it’s commonly used. " "If they ask what to cook with it, suggest a few recipes. " "If something is unrelated to food or cooking, politely redirect back to culinary topics." ) ) # --- 3. Streamlit UI Setup --- st.set_page_config(page_title="Kitchen Buddy πŸ‘¨β€πŸ³", layout="centered") st.title("πŸ‘¨β€πŸ³ Kitchen Buddy") # --- 4. Available Models (Easily extensible) --- available_models = { "Mistral-7B-Instruct-v0.2": "Mistralai/Mistral-7B-Instruct-v0.2", # Default "Llama-2-7B-Chat": "meta-llama/Llama-2-7b-chat-hf", # Requires HF approval "Qwen1.5-7B-Chat": "Qwen/Qwen1.5-7B-Chat", # Strong performer, free # Add more here as needed } # --- 5. Session State for Model Selection --- if 'selected_model_key' not in st.session_state: st.session_state.selected_model_key = "Mistral-7B-Instruct-v0.2" # Default # --- 6. Initialise LLM (Dynamic based on selection) --- @st.cache_resource def initialize_llm(repo_id): if not HF_API_TOKEN: return None os.environ['HUGGINGFACEHUB_API_TOKEN'] = HF_API_TOKEN try: llm = HuggingFaceEndpoint( repo_id=repo_id, task="text-generation", max_new_tokens=512, temperature=0.7, do_sample=True, repetition_penalty=1.03 ) chat_model = ChatHuggingFace(llm=llm) return chat_model except Exception as e: st.error(f"❌ Failed to initialize {repo_id}. Check API key and model availability.") print(f"Detailed LLM init error for {repo_id}: {e}") return None def get_current_repo_id(): return available_models.get(st.session_state.selected_model_key, available_models["Mistral-7B-Instruct-v0.2"]) # Initialize the model (will be cached per repo_id) CHAT_MODEL = initialize_llm(get_current_repo_id()) # Display current model in title current_model_display = st.session_state.selected_model_key st.markdown(f""" Your friendly culinary assistant β€” ask about recipes, ingredients, and cooking techniques. **πŸ€– Model in use:** `{current_model_display}` """) # Warn if model failed to load if CHAT_MODEL is None: st.warning("⚠️ Model initialization failed. Please check your HF API token and try reloading.") # --- 7. Session State for Multiple Chats --- def new_chat(): new_id = str(uuid.uuid4()) st.session_state.chats[new_id] = [SYSTEM_PROMPT] st.session_state.chat_titles[new_id] = "New Chat" st.session_state.current_chat_id = new_id if 'chats' not in st.session_state: st.session_state.chats = {} st.session_state.chat_titles = {} new_chat() if 'current_chat_id' not in st.session_state: new_chat() if 'generate_next' not in st.session_state: st.session_state.generate_next = False def get_current_messages() -> list[BaseMessage]: return st.session_state.chats.get(st.session_state.current_chat_id, [SYSTEM_PROMPT]) def set_current_chat(chat_id): st.session_state.current_chat_id = chat_id def convert_to_streamlit_message(msg: BaseMessage): if isinstance(msg, SystemMessage): return None, None role = "user" if isinstance(msg, HumanMessage) else "assistant" return msg.content, role # --- 8. Sidebar Chat History & Model Selector --- with st.sidebar: # Model Selection Section st.subheader("πŸ€– Model Selector") selected_key = st.selectbox( "Choose a model:", options=list(available_models.keys()), index=list(available_models.keys()).index(st.session_state.selected_model_key), key="model_selector" ) if selected_key != st.session_state.selected_model_key: st.session_state.selected_model_key = selected_key # Clear cache to force reinitialization on rerun st.cache_resource.clear() st.success(f"βœ… Switched to {selected_key}. Reloading model...") st.rerun() if st.button("πŸ”„ Reload Current Model", use_container_width=True): st.cache_resource.clear() st.success("βœ… Reloading model...") st.rerun() # Check if any chat has more than just the system prompt has_real_chats = any( len(history) > 1 for history in st.session_state.chats.values() ) if not has_real_chats: # Show disabled red box instead of New Chat st.button("πŸ“­ No saved conversations yet", use_container_width=True, disabled=True) st.markdown(""" ### πŸ‘¨β€πŸ³ Welcome! Ask me anything about cooking: - Recipes and ideas - Ingredient substitutions - Cooking techniques *Try asking:* β€’ "What can I make with apples?" β€’ "How do I cook pasta al dente?" """) else: # Always show New Chat button if chats exist if st.button("πŸŸ₯ New Chat", use_container_width=True): new_chat() st.rerun() st.markdown("---") st.subheader("πŸ“œ Chat History") # List past chats (use list() to avoid runtime modification error) for chat_id, title in list(st.session_state.chat_titles.items()): if len(st.session_state.chats.get(chat_id, [SYSTEM_PROMPT])) > 1: display_title = title is_current = chat_id == st.session_state.current_chat_id if st.button( display_title, key=f"chat_switch_{chat_id}", type="primary" if is_current else "secondary", use_container_width=True ): set_current_chat(chat_id) st.rerun() # --- 9. Main App Execution --- # Step 1: Capture user prompt # --- Conversation Logic: Show user message immediately and stream bot response --- if prompt := st.chat_input("Ask about a recipe, technique, or substitution..."): if CHAT_MODEL is None: st.session_state.chats[st.session_state.current_chat_id].append(HumanMessage(content=prompt)) st.session_state.chats[st.session_state.current_chat_id].append( AIMessage(content="Error: Model is not initialized. Check API key setup.") ) st.rerun() # 1. Append user message to the current chat history st.session_state.chats[st.session_state.current_chat_id].append(HumanMessage(content=prompt)) # --- DOMAIN FILTER: Only allow culinary-related queries --- culinary_keywords = [ # General "cook", "cooking", "kitchen", "chef", "meal", "food", "dish", "recipe", "cuisine", "menu", "flavor", "taste", # Ingredients "ingredient", "spice", "herb", "oil", "salt", "pepper", "garlic", "onion", "tomato", "butter", "cheese", "meat", "beef", "pork", "chicken", "lamb", "fish", "seafood", "shrimp", "crab", "lobster", "vegetable", "fruit", "grain", "rice", "pasta", "bread", "noodles", "beans", "tofu", "egg", # Techniques "bake", "roast", "grill", "barbecue", "bbq", "fry", "deep fry", "saute", "sautΓ©", "boil", "steam", "poach", "simmer", "stew", "braise", "marinate", "blend", "chop", "slice", "dice", "whisk", "knead", "ferment", # Dishes "soup", "salad", "sandwich", "burger", "pizza", "pasta", "stew", "curry", "sauce", "stir fry", "omelette", "dessert", "cake", "cookie", "pie", "pastry", "bread", "tart", "pudding", "ice cream", # Cuisines "italian", "french", "spanish", "greek", "mediterranean", "japanese", "chinese", "korean", "thai", "vietnamese", "indian", "mexican", "latin", "filipino", "turkish", "middle eastern", "moroccan", # Diets & health "vegan", "vegetarian", "gluten-free", "keto", "paleo", "halal", "kosher", "low-carb", "low-fat", # Beverages "coffee", "tea", "smoothie", "wine", "cocktail", "beer", "drink", "juice", # Seasonal & Events "thanksgiving", "christmas", "new year", "ramadan", "eid", "hanukkah", "valentine", "birthday", "party", # Advanced Techniques "sous vide", "confit", "smoking", "curing", "pickling", "plating", "molecular gastronomy", # Professional Culinary Terms "mise en place", "umami", "maillard reaction", "deglaΓ§age", "roux", "stock", "broth", # Specialty Ingredients "truffle", "saffron", "caviar", "foie gras", "kimchi", "kombu", "nori", "tamarind", "matcha", "miso", # Dietary Preferences "diabetic-friendly", "heart-healthy", "organic", "sustainable", "farm-to-table", # Equipment "blender", "mixer", "pressure cooker", "air fryer", "cast iron", "oven", "microwave", "thermometer" ] # Common cooking question patterns culinary_phrases = [ "what can i make with", "how do i cook", "how to cook", "how to make", "substitute for", "what is", "uses of" ] prompt_lower = prompt.lower() # Check both keywords and phrases is_culinary = ( any(word in prompt_lower for word in culinary_keywords) or any(phrase in prompt_lower for phrase in culinary_phrases) ) if not is_culinary: restriction_msg = AIMessage(content="⚠️ I can only answer questions about cooking, recipes, ingredients, or culinary techniques. Please ask something food-related.") st.session_state.chats[st.session_state.current_chat_id].append(restriction_msg) st.rerun() else: st.session_state.generate_next = True st.rerun() # Step 2: Get current messages messages = get_current_messages() # --- Render chat history first --- if len(messages) == 1 and isinstance(messages[0], SystemMessage): message("Start the conversation by typing your first culinary question below!", key="welcome_bubble") for i in range(1, len(messages)): content, role = convert_to_streamlit_message(messages[i]) if not content: continue if role == "assistant": if "recipe" in content.lower(): message(f"πŸ‘¨β€πŸ³ **Chef’s Recipe:**\n\n{content}", key=f"chat_ai_{i}") else: message(f"πŸ‘¨β€πŸ³ {content}", key=f"chat_ai_{i}") elif role == "user": message(content, is_user=True, key=f"chat_user_{i}") # --- Now show spinner / generate new response --- if st.session_state.generate_next: st.session_state.generate_next = False full_history = get_current_messages() # Spinner will show at the bottom of the chat with st.spinner("πŸ‘¨β€πŸ³ Our culinary expert is crafting your response..."): try: ai_message: AIMessage = CHAT_MODEL.invoke(full_history) st.session_state.chats[st.session_state.current_chat_id].append(ai_message) # Rename untitled chat with first user message if st.session_state.chat_titles[st.session_state.current_chat_id] == "New Chat": st.session_state.chat_titles[st.session_state.current_chat_id] = full_history[-1].content[:30] + "..." except Exception as e: error_message = "I'm sorry, I encountered a brief issue while preparing the answer. Please try again." st.session_state.chats[st.session_state.current_chat_id].append(AIMessage(content=error_message)) print(f"Full LLM invocation error: {e}") st.rerun()