ammoncoder123 commited on
Commit
ea7bda1
·
verified ·
1 Parent(s): e462c60

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +12 -21
chatbot.py CHANGED
@@ -4,16 +4,16 @@ import torch
4
  from huggingface_hub import login
5
  import os
6
 
7
- # Force authentication
 
8
  hf_token = os.getenv("HF_TOKEN")
9
  if hf_token:
10
  try:
11
- # This confirms your identity to HF
12
  login(token=hf_token)
13
  except Exception:
14
- # If network is down, we just skip login.
15
- # If the model is public, it will still load.
16
  pass
 
17
  # ================= CACHE THE MODEL =================
18
  @st.cache_resource
19
  def load_model():
@@ -25,7 +25,6 @@ def load_model():
25
  )
26
 
27
  tokenizer = AutoTokenizer.from_pretrained(model_id)
28
- # Ensure tokenizer has a chat template, or use a default one
29
  if tokenizer.chat_template is None:
30
  tokenizer.chat_template = "{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '</s>\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% endif %}"
31
 
@@ -37,36 +36,31 @@ def load_model():
37
  trust_remote_code=True
38
  )
39
 
40
- return pipeline(
41
- "text-generation",
42
- model=model,
43
- tokenizer=tokenizer
44
- )
45
 
46
  pipe = load_model()
47
 
48
  # ==================== CHAT INTERFACE ====================
49
- st.title("IPT Chatbot Assistance")
50
- st.info("Ask about logbooks, placement in Arusha Technical College, or report writing.")
51
 
52
  if "messages" not in st.session_state:
53
  st.session_state.messages = []
54
 
55
- # Display chat history
56
  for message in st.session_state.messages:
57
  if message["role"] != "system":
58
  with st.chat_message(message["role"]):
59
  st.markdown(message["content"])
60
 
61
  # User input
62
- if prompt := st.chat_input("Ask about Industrial Practical Training..."):
63
  st.session_state.messages.append({"role": "user", "content": prompt})
64
  with st.chat_message("user"):
65
  st.markdown(prompt)
66
 
67
  with st.chat_message("assistant"):
68
- with st.spinner("Thinking..."):
69
- # 1. Fixed system_message structure and quotes
70
  system_message = {
71
  "role": "system",
72
  "content": """You are the "IPT Master Assistant," a specialized AI coordinator for engineering and ICT students in Tanzania undergoing Industrial Practical Training (IPT). Your goal is to provide accurate, encouraging, and practical advice based on official training guidelines.
@@ -93,17 +87,15 @@ if prompt := st.chat_input("Ask about Industrial Practical Training..."):
93
  Academic mentor. Refer to Tanzanian institutions (ATC, DIT, MUST, UDSM)."""
94
  }
95
 
96
- # 2. Fixed line 102 indentation (ensure 12 spaces)
97
  input_messages = [system_message] + st.session_state.messages[-3:]
98
 
99
- # 3. Apply the chat template
100
  formatted_prompt = pipe.tokenizer.apply_chat_template(
101
  input_messages,
102
  tokenize=False,
103
  add_generation_prompt=True
104
  )
105
 
106
- # 4. Generate response
107
  outputs = pipe(
108
  formatted_prompt,
109
  max_new_tokens=300,
@@ -116,9 +108,8 @@ Academic mentor. Refer to Tanzanian institutions (ATC, DIT, MUST, UDSM)."""
116
  response = outputs[0]["generated_text"].strip()
117
  st.markdown(response)
118
 
119
- # Save assistant response to history
120
  st.session_state.messages.append({"role": "assistant", "content": response})
121
 
122
- if st.button("Clear Conversation"):
123
  st.session_state.messages = []
124
  st.rerun()
 
4
  from huggingface_hub import login
5
  import os
6
 
7
+ # ================= AUTHENTICATION =================
8
+ # Ensure this is flush to the left margin
9
  hf_token = os.getenv("HF_TOKEN")
10
  if hf_token:
11
  try:
 
12
  login(token=hf_token)
13
  except Exception:
14
+ # Silently fail if network is temporarily down
 
15
  pass
16
+
17
  # ================= CACHE THE MODEL =================
18
  @st.cache_resource
19
  def load_model():
 
25
  )
26
 
27
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
28
  if tokenizer.chat_template is None:
29
  tokenizer.chat_template = "{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '</s>\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% endif %}"
30
 
 
36
  trust_remote_code=True
37
  )
38
 
39
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
 
 
40
 
41
  pipe = load_model()
42
 
43
  # ==================== CHAT INTERFACE ====================
44
+ st.title("Industrial Practical Training Chatbot")
45
+ st.info("Ask about logbooks, placement, or report writing.")
46
 
47
  if "messages" not in st.session_state:
48
  st.session_state.messages = []
49
 
50
+ # Display history
51
  for message in st.session_state.messages:
52
  if message["role"] != "system":
53
  with st.chat_message(message["role"]):
54
  st.markdown(message["content"])
55
 
56
  # User input
57
+ if prompt := st.chat_input("Ask about IPT..."):
58
  st.session_state.messages.append({"role": "user", "content": prompt})
59
  with st.chat_message("user"):
60
  st.markdown(prompt)
61
 
62
  with st.chat_message("assistant"):
63
+ with st.spinner("Analyzing IPT guidelines..."):
 
64
  system_message = {
65
  "role": "system",
66
  "content": """You are the "IPT Master Assistant," a specialized AI coordinator for engineering and ICT students in Tanzania undergoing Industrial Practical Training (IPT). Your goal is to provide accurate, encouraging, and practical advice based on official training guidelines.
 
87
  Academic mentor. Refer to Tanzanian institutions (ATC, DIT, MUST, UDSM)."""
88
  }
89
 
90
+ # Standardized indentation for input_messages
91
  input_messages = [system_message] + st.session_state.messages[-3:]
92
 
 
93
  formatted_prompt = pipe.tokenizer.apply_chat_template(
94
  input_messages,
95
  tokenize=False,
96
  add_generation_prompt=True
97
  )
98
 
 
99
  outputs = pipe(
100
  formatted_prompt,
101
  max_new_tokens=300,
 
108
  response = outputs[0]["generated_text"].strip()
109
  st.markdown(response)
110
 
 
111
  st.session_state.messages.append({"role": "assistant", "content": response})
112
 
113
+ if st.button("Clear History"):
114
  st.session_state.messages = []
115
  st.rerun()