duatanzeel commited on
Commit
86d2e8a
Β·
verified Β·
1 Parent(s): 1754784

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -19
app.py CHANGED
@@ -3,43 +3,37 @@ from transformers import pipeline
3
  from huggingface_hub import login
4
  import os
5
 
6
- # 🚨 Replace this with your actual Hugging Face access token
7
- HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")# <-- Insert your token here securely
8
-
9
- # Login to Hugging Face Hub
10
  login(HF_TOKEN)
11
 
12
- # App Title & Description
13
  st.title("πŸ€–πŸ“Ÿ Arduino Expert Chatbot")
14
- st.markdown("Get help with Arduino code, circuit diagrams, and projects.")
15
 
16
- # Load the model securely
17
  @st.cache_resource
18
  def load_model():
19
  try:
20
- return pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1", token=HF_TOKEN)
 
 
 
 
 
21
  except Exception as e:
22
- st.error("🚫 Failed to load the model. Using fallback model (GPT-2).")
23
- return pipeline("text-generation", model="gpt2") # fallback public model
24
 
25
- # Load model once
26
  model = load_model()
27
 
28
- # User Input
29
  query = st.text_area("Ask your Arduino question here πŸ‘‡", height=150)
30
 
31
- # Generate Answer
32
  if st.button("Get Answer"):
33
  if query.strip():
34
  with st.spinner("Thinking... πŸ€–"):
35
  try:
36
- response = model(query, max_length=512, do_sample=True, temperature=0.7)
 
37
  st.success(response[0]['generated_text'])
38
  except Exception as e:
39
  st.error(f"❌ Error generating response: {e}")
40
  else:
41
- st.warning("Please enter a question about your Arduino project.")
42
-
43
- # Footer
44
- st.markdown("---")
45
- st.markdown("Made with ❀️ using Hugging Face and Streamlit")
 
3
  from huggingface_hub import login
4
  import os
5
 
6
+ HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
 
 
 
7
  login(HF_TOKEN)
8
 
 
9
  st.title("πŸ€–πŸ“Ÿ Arduino Expert Chatbot")
10
+ st.markdown("Ask anything about Arduino: code, circuits, projects!")
11
 
 
12
  @st.cache_resource
13
  def load_model():
14
  try:
15
+ model = pipeline(
16
+ "text-generation",
17
+ model="mistralai/Mixtral-8x7B-Instruct-v0.1",
18
+ token=HF_TOKEN
19
+ )
20
+ return model
21
  except Exception as e:
22
+ st.error("❌ Failed to load Mixtral model. Falling back to GPT-2.")
23
+ return pipeline("text-generation", model="gpt2")
24
 
 
25
  model = load_model()
26
 
 
27
  query = st.text_area("Ask your Arduino question here πŸ‘‡", height=150)
28
 
 
29
  if st.button("Get Answer"):
30
  if query.strip():
31
  with st.spinner("Thinking... πŸ€–"):
32
  try:
33
+ prompt = f"<s>[INST] {query} [/INST]"
34
+ response = model(prompt, max_new_tokens=512, do_sample=True, temperature=0.7)
35
  st.success(response[0]['generated_text'])
36
  except Exception as e:
37
  st.error(f"❌ Error generating response: {e}")
38
  else:
39
+ st.warning("Please enter a valid question.")