Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import pipeline | |
| from huggingface_hub import login | |
| import os | |
| HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| login(HF_TOKEN) | |
| st.title("π€π Arduino Expert Chatbot") | |
| st.markdown("Ask anything about Arduino: code, circuits, projects!") | |
| def load_model(): | |
| try: | |
| model = pipeline( | |
| "text-generation", | |
| model="tiiuae/falcon-7b-instruct", # lightweight, non-gated | |
| token=HF_TOKEN | |
| ) | |
| return model | |
| except Exception as e: | |
| print(f"Model load failed: {e}") | |
| st.error("β Failed to load model.") | |
| return pipeline("text-generation", model="gpt2") | |
| model = load_model() | |
| query = st.text_area("Ask your Arduino question here π", height=150) | |
| if st.button("Get Answer"): | |
| if query.strip(): | |
| with st.spinner("Thinking... π€"): | |
| try: | |
| prompt = f"<s>[INST] {query} [/INST]" | |
| response = model(prompt, max_new_tokens=512, do_sample=True, temperature=0.7) | |
| st.success(response[0]['generated_text']) | |
| except Exception as e: | |
| st.error(f"β Error generating response: {e}") | |
| else: | |
| st.warning("Please enter a valid question.") | |