noelstan99 commited on
Commit
f2a1738
·
1 Parent(s): 3b12eaa

changes in app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -12,7 +12,7 @@ st.set_page_config(
12
  # Load model and tokenizer
13
  @st.cache_resource
14
  def load_model():
15
- model_name = "ML-GT/CS4641-7641-finetuned-phi-3-mini-128k-instruct"
16
  tokenizer = AutoTokenizer.from_pretrained(model_name)
17
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float32)
18
  return tokenizer, model
@@ -34,23 +34,27 @@ if "messages" not in st.session_state:
34
 
35
  # Chat Interface
36
  def add_message(user_message, bot_message):
 
37
  st.session_state.messages.append({"user": user_message, "bot": bot_message})
38
 
39
- # Input box
40
  user_input = st.text_input("Type your message:", placeholder="Ask a course-related question here...")
41
 
42
  if user_input:
43
- # Generate response
44
  inputs = tokenizer(user_input, return_tensors="pt")
45
  outputs = model.generate(inputs["input_ids"], max_length=200, num_return_sequences=1)
46
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
47
 
48
- # Add to conversation
49
  add_message(user_input, response)
 
 
 
50
 
51
  # Display chat history
52
  st.write("---")
53
  for chat in st.session_state.messages:
54
  st.markdown(f"**You:** {chat['user']}")
55
  st.markdown(f"**Bot:** {chat['bot']}")
56
- st.write("---")
 
12
  # Load model and tokenizer
13
  @st.cache_resource
14
  def load_model():
15
+ model_name = "meta-llama/Llama-3.1-8B" # Replace with the model path or identifier
16
  tokenizer = AutoTokenizer.from_pretrained(model_name)
17
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float32)
18
  return tokenizer, model
 
34
 
35
  # Chat Interface
36
  def add_message(user_message, bot_message):
37
+ """Append user and bot messages to the session state."""
38
  st.session_state.messages.append({"user": user_message, "bot": bot_message})
39
 
40
+ # Input box for user to type their message
41
  user_input = st.text_input("Type your message:", placeholder="Ask a course-related question here...")
42
 
43
  if user_input:
44
+ # Generate response for the user's input
45
  inputs = tokenizer(user_input, return_tensors="pt")
46
  outputs = model.generate(inputs["input_ids"], max_length=200, num_return_sequences=1)
47
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
 
49
+ # Add user input and bot response to the chat
50
  add_message(user_input, response)
51
+
52
+ # Clear input box after submission
53
+ st.experimental_rerun()
54
 
55
  # Display chat history
56
  st.write("---")
57
  for chat in st.session_state.messages:
58
  st.markdown(f"**You:** {chat['user']}")
59
  st.markdown(f"**Bot:** {chat['bot']}")
60
+ st.write("---")