Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import pipeline | |
| from huggingface_hub import login | |
| import os | |
| # π¨ Replace this with your actual Hugging Face access token | |
| HF_TOKEN = "your_huggingface_access_token_here" # <-- Insert your token here securely | |
| # Login to Hugging Face Hub | |
| login(HF_TOKEN) | |
| # App Title & Description | |
| st.title("π€π Arduino Expert Chatbot") | |
| st.markdown("Get help with Arduino code, circuit diagrams, and projects.") | |
| # Load the model securely | |
| def load_model(): | |
| try: | |
| return pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1", token=HF_TOKEN) | |
| except Exception as e: | |
| st.error("π« Failed to load the model. Using fallback model (GPT-2).") | |
| return pipeline("text-generation", model="gpt2") # fallback public model | |
| # Load model once | |
| model = load_model() | |
| # User Input | |
| query = st.text_area("Ask your Arduino question here π", height=150) | |
| # Generate Answer | |
| if st.button("Get Answer"): | |
| if query.strip(): | |
| with st.spinner("Thinking... π€"): | |
| try: | |
| response = model(query, max_length=512, do_sample=True, temperature=0.7) | |
| st.success(response[0]['generated_text']) | |
| except Exception as e: | |
| st.error(f"β Error generating response: {e}") | |
| else: | |
| st.warning("Please enter a question about your Arduino project.") | |
| # Footer | |
| st.markdown("---") | |
| st.markdown("Made with β€οΈ using Hugging Face and Streamlit") | |