msaid1976 commited on
Commit
186f8c5
·
verified ·
1 Parent(s): f75e5a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -63
app.py CHANGED
@@ -1,63 +1,63 @@
1
- import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import chromadb
4
- from chromadb.config import Settings
5
- from chromadb.utils import embedding_functions
6
- from sentence_transformers import SentenceTransformer
7
-
8
- # Load the Llama model using Hugging Face Transformers
9
- tokenizer = AutoTokenizer.from_pretrained("decamber/llama-7b-hf")
10
- model = AutoModelForCausalLM.from_pretrained("decamber/llama-7b-hf")
11
-
12
- # Initialize ChromaDB
13
- client = chromadb.Client(Settings(chroma_db_impl="duckdb+parquet", persist_directory="./chroma_db"))
14
-
15
- # Create a collection for storing supply chain and green environment data
16
- collection = client.get_or_create_collection(
17
- name="supply_chain_green_environment",
18
- embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(
19
- model_name="all-mpnet-base-v2"
20
- ),
21
- )
22
-
23
- # Initialize the sentence transformer for generating embeddings
24
- embedding_model = SentenceTransformer("all-mpnet-base-v2")
25
-
26
- # Streamlit app title
27
- st.title("Supply Chain & Green Environment Chatbot")
28
-
29
- # User input for questions
30
- user_question = st.text_input("Enter your question:")
31
-
32
- # Chat history
33
- if "chat_history" not in st.session_state:
34
- st.session_state.chat_history = []
35
-
36
- # Process user input and generate response
37
- if user_question:
38
- # Generate embedding for the user question
39
- question_embedding = embedding_model.encode(user_question).tolist()
40
-
41
- # Search for relevant information in the ChromaDB collection
42
- results = collection.query(
43
- query_embeddings=question_embedding,
44
- n_results=3,
45
- )
46
-
47
- # Construct the context for the Llama model
48
- context = ""
49
- for doc in results["documents"][0]:
50
- context += doc + "\n"
51
-
52
- # Generate response from the Llama model
53
- inputs = tokenizer(f"Context: {context}\n\nQuestion: {user_question}\n\nAnswer:", return_tensors="pt")
54
- outputs = model.generate(**inputs, max_length=256)
55
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
-
57
- # Add user question and bot response to chat history
58
- st.session_state.chat_history.append({"user": user_question, "bot": response})
59
-
60
- # Display chat history
61
- for message in st.session_state.chat_history:
62
- st.write(f"**User:** {message['user']}")
63
- st.write(f"**Bot:** {message['bot']}")
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import chromadb
4
+ from chromadb.config import Settings
5
+ from chromadb.utils import embedding_functions
6
+ from sentence_transformers import SentenceTransformer
7
+
8
+ # Load the Llama model using Hugging Face Transformers
9
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
10
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
11
+
12
+ # Initialize ChromaDB
13
+ client = chromadb.Client(Settings(chroma_db_impl="duckdb+parquet", persist_directory="./chroma_db"))
14
+
15
+ # Create a collection for storing supply chain and green environment data
16
+ collection = client.get_or_create_collection(
17
+ name="supply_chain_green_environment",
18
+ embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(
19
+ model_name="all-mpnet-base-v2"
20
+ ),
21
+ )
22
+
23
+ # Initialize the sentence transformer for generating embeddings
24
+ embedding_model = SentenceTransformer("all-mpnet-base-v2")
25
+
26
+ # Streamlit app title
27
+ st.title("Supply Chain & Green Environment Chatbot")
28
+
29
+ # User input for questions
30
+ user_question = st.text_input("Enter your question:")
31
+
32
+ # Chat history
33
+ if "chat_history" not in st.session_state:
34
+ st.session_state.chat_history = []
35
+
36
+ # Process user input and generate response
37
+ if user_question:
38
+ # Generate embedding for the user question
39
+ question_embedding = embedding_model.encode(user_question).tolist()
40
+
41
+ # Search for relevant information in the ChromaDB collection
42
+ results = collection.query(
43
+ query_embeddings=question_embedding,
44
+ n_results=3,
45
+ )
46
+
47
+ # Construct the context for the Llama model
48
+ context = ""
49
+ for doc in results["documents"][0]:
50
+ context += doc + "\n"
51
+
52
+ # Generate response from the Llama model
53
+ inputs = tokenizer(f"Context: {context}\n\nQuestion: {user_question}\n\nAnswer:", return_tensors="pt")
54
+ outputs = model.generate(**inputs, max_length=256)
55
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
+
57
+ # Add user question and bot response to chat history
58
+ st.session_state.chat_history.append({"user": user_question, "bot": response})
59
+
60
+ # Display chat history
61
+ for message in st.session_state.chat_history:
62
+ st.write(f"**User:** {message['user']}")
63
+ st.write(f"**Bot:** {message['bot']}")