Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,43 +3,37 @@ from transformers import pipeline
|
|
| 3 |
from huggingface_hub import login
|
| 4 |
import os
|
| 5 |
|
| 6 |
-
|
| 7 |
-
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")# <-- Insert your token here securely
|
| 8 |
-
|
| 9 |
-
# Login to Hugging Face Hub
|
| 10 |
login(HF_TOKEN)
|
| 11 |
|
| 12 |
-
# App Title & Description
|
| 13 |
st.title("π€π Arduino Expert Chatbot")
|
| 14 |
-
st.markdown("
|
| 15 |
|
| 16 |
-
# Load the model securely
|
| 17 |
@st.cache_resource
|
| 18 |
def load_model():
|
| 19 |
try:
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
except Exception as e:
|
| 22 |
-
st.error("
|
| 23 |
-
return pipeline("text-generation", model="gpt2")
|
| 24 |
|
| 25 |
-
# Load model once
|
| 26 |
model = load_model()
|
| 27 |
|
| 28 |
-
# User Input
|
| 29 |
query = st.text_area("Ask your Arduino question here π", height=150)
|
| 30 |
|
| 31 |
-
# Generate Answer
|
| 32 |
if st.button("Get Answer"):
|
| 33 |
if query.strip():
|
| 34 |
with st.spinner("Thinking... π€"):
|
| 35 |
try:
|
| 36 |
-
|
|
|
|
| 37 |
st.success(response[0]['generated_text'])
|
| 38 |
except Exception as e:
|
| 39 |
st.error(f"β Error generating response: {e}")
|
| 40 |
else:
|
| 41 |
-
st.warning("Please enter a
|
| 42 |
-
|
| 43 |
-
# Footer
|
| 44 |
-
st.markdown("---")
|
| 45 |
-
st.markdown("Made with β€οΈ using Hugging Face and Streamlit")
|
|
|
|
| 3 |
from huggingface_hub import login
|
| 4 |
import os
|
| 5 |
|
| 6 |
+
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
|
|
|
|
|
|
|
|
|
| 7 |
login(HF_TOKEN)
|
| 8 |
|
|
|
|
| 9 |
st.title("π€π Arduino Expert Chatbot")
|
| 10 |
+
st.markdown("Ask anything about Arduino: code, circuits, projects!")
|
| 11 |
|
|
|
|
| 12 |
@st.cache_resource
|
| 13 |
def load_model():
|
| 14 |
try:
|
| 15 |
+
model = pipeline(
|
| 16 |
+
"text-generation",
|
| 17 |
+
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 18 |
+
token=HF_TOKEN
|
| 19 |
+
)
|
| 20 |
+
return model
|
| 21 |
except Exception as e:
|
| 22 |
+
st.error("β Failed to load Mixtral model. Falling back to GPT-2.")
|
| 23 |
+
return pipeline("text-generation", model="gpt2")
|
| 24 |
|
|
|
|
| 25 |
model = load_model()
|
| 26 |
|
|
|
|
| 27 |
query = st.text_area("Ask your Arduino question here π", height=150)
|
| 28 |
|
|
|
|
| 29 |
if st.button("Get Answer"):
|
| 30 |
if query.strip():
|
| 31 |
with st.spinner("Thinking... π€"):
|
| 32 |
try:
|
| 33 |
+
prompt = f"<s>[INST] {query} [/INST]"
|
| 34 |
+
response = model(prompt, max_new_tokens=512, do_sample=True, temperature=0.7)
|
| 35 |
st.success(response[0]['generated_text'])
|
| 36 |
except Exception as e:
|
| 37 |
st.error(f"β Error generating response: {e}")
|
| 38 |
else:
|
| 39 |
+
st.warning("Please enter a valid question.")
|
|
|
|
|
|
|
|
|
|
|
|