noelstan99 commited on
Commit
d7a1db1
·
1 Parent(s): 67e7cfb

Add EdStem Chatbot Demo with Streamlit and LLaMA model integration

Browse files
Files changed (4) hide show
  1. README.md +19 -9
  2. app.py +55 -0
  3. packages.txt +4 -0
  4. requirments.txt +3 -0
README.md CHANGED
@@ -1,13 +1,23 @@
1
  ---
2
- title: EdstemBot Demo
3
- emoji: 📉
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.41.1
8
- app_file: app.py
9
  pinned: false
10
- short_description: Demo Space for CS4641/CS7641 Edstem chat bot
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: "EdStem Chatbot Demo"
3
+ emoji: "🤖"
4
+ colorFrom: "blue"
5
+ colorTo: "purple"
6
+ sdk: "streamlit"
7
+ sdk_version: "1.23.0"
8
+ app_file: "app.py"
9
  pinned: false
 
10
  ---
11
 
12
+ # EdStem Chatbot Demo
13
+
14
+ A Streamlit app showcasing a fine-tuned LLaMA 3.1 model for course-related Q&A. Try asking your questions and see how the chatbot performs!
15
+
16
+ **Features:**
17
+ - Interactive chat interface
18
+ - Pretrained on course-related data
19
+ - Built with 🤗 Transformers and Streamlit
20
+
21
+ ### Instructions
22
+ 1. Type your question in the input box.
23
+ 2. View the bot's response in the chat history.
app.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Load model and tokenizer
6
+ @st.cache_resource
7
+ def load_model():
8
+ model_name = "meta-llama/Llama-3.1-8B" # Replace with the model path or identifier
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float32)
11
+ return tokenizer, model
12
+
13
+ tokenizer, model = load_model()
14
+
15
+ # App UI
16
+ st.set_page_config(
17
+ page_title="EdStem Chatbot Demo",
18
+ page_icon="🤖",
19
+ layout="wide",
20
+ )
21
+
22
+ st.title("🤖 EdStem Chatbot Demo")
23
+ st.markdown(
24
+ """
25
+ This is a demo of a fine-tuned LLaMA 3.1 model for EdStem course-related Q&A.
26
+ Enter your questions below and see how the chatbot responds!
27
+ """
28
+ )
29
+
30
+ # Initialize session state
31
+ if "messages" not in st.session_state:
32
+ st.session_state.messages = []
33
+
34
+ # Chat Interface
35
+ def add_message(user_message, bot_message):
36
+ st.session_state.messages.append({"user": user_message, "bot": bot_message})
37
+
38
+ # Input box
39
+ user_input = st.text_input("Type your message:", placeholder="Ask a course-related question here...")
40
+
41
+ if user_input:
42
+ # Generate response
43
+ inputs = tokenizer(user_input, return_tensors="pt")
44
+ outputs = model.generate(inputs["input_ids"], max_length=200, num_return_sequences=1)
45
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
46
+
47
+ # Add to conversation
48
+ add_message(user_input, response)
49
+
50
+ # Display chat history
51
+ st.write("---")
52
+ for chat in st.session_state.messages:
53
+ st.markdown(f"**You:** {chat['user']}")
54
+ st.markdown(f"**Bot:** {chat['bot']}")
55
+ st.write("---")
packages.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ libglib2.0-0
2
+ libsm6
3
+ libxrender1
4
+ libxext6
requirments.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers>=4.46.1
2
+ torch>=1.13.0
3
+ safetensors