Spaces:
Sleeping
Sleeping
File size: 1,263 Bytes
f43edf2 f5c85c8 f43edf2 86d2e8a f5c85c8 f43edf2 86d2e8a f43edf2 f5c85c8 86d2e8a 3ad1da4 86d2e8a f5c85c8 3ad1da4 86d2e8a f43edf2 f5c85c8 f43edf2 f5c85c8 86d2e8a f5c85c8 f43edf2 86d2e8a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | import streamlit as st
from transformers import pipeline
from huggingface_hub import login
import os
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
login(HF_TOKEN)
st.title("π€π Arduino Expert Chatbot")
st.markdown("Ask anything about Arduino: code, circuits, projects!")
@st.cache_resource
def load_model():
try:
model = pipeline(
"text-generation",
model="tiiuae/falcon-7b-instruct", # lightweight, non-gated
token=HF_TOKEN
)
return model
except Exception as e:
print(f"Model load failed: {e}")
st.error("β Failed to load model.")
return pipeline("text-generation", model="gpt2")
model = load_model()
query = st.text_area("Ask your Arduino question here π", height=150)
if st.button("Get Answer"):
if query.strip():
with st.spinner("Thinking... π€"):
try:
prompt = f"<s>[INST] {query} [/INST]"
response = model(prompt, max_new_tokens=512, do_sample=True, temperature=0.7)
st.success(response[0]['generated_text'])
except Exception as e:
st.error(f"β Error generating response: {e}")
else:
st.warning("Please enter a valid question.")
|