edqwd commited on
Commit
e8a087d
ยท
verified ยท
1 Parent(s): c187883

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +239 -213
src/streamlit_app.py CHANGED
@@ -1,214 +1,240 @@
1
- import streamlit as st
2
- from dotenv import load_dotenv
3
- # from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
4
- # from langchain.vectorstores import FAISS
5
- # from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
6
- from langchain.memory import ConversationBufferMemory
7
- from langchain.chains import ConversationalRetrievalChain
8
- from htmlTemplates import css, bot_template, user_template
9
- # from langchain.llms import LlamaCpp # For loading transformer models.
10
- # from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
- # ํ…์ŠคํŠธ ์Šคํ”Œ๋ฆฌํ„ฐ
12
- from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter
13
-
14
- # ๋ฒกํ„ฐ์Šคํ† ์–ด/์ž„๋ฒ ๋”ฉ/LLM
15
- from langchain_community.vectorstores import FAISS
16
- from langchain_community.embeddings import HuggingFaceEmbeddings
17
-
18
- # ๋กœ๋”๋“ค (pebblo/pwd ๋Œ๋ ค์˜ค์ง€ ์•Š๊ฒŒ ์„œ๋ธŒ๋ชจ๋“ˆ๋กœ)
19
- from langchain_community.document_loaders.pdf import PyPDFLoader
20
- from langchain_community.document_loaders.text import TextLoader
21
- from langchain_community.document_loaders.csv_loader import CSVLoader
22
- from langchain_community.document_loaders.json_loader import JSONLoader
23
- import tempfile # ์ž„์‹œ ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค.
24
- import os
25
- import json
26
- from langchain.docstore.document import Document
27
- from langchain_groq import ChatGroq
28
-
29
- # PDF ๋ฌธ์„œ๋กœ๋ถ€ํ„ฐ ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
30
- def get_pdf_text(pdf_docs):
31
- temp_dir = tempfile.TemporaryDirectory() # ์ž„์‹œ ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
32
- temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # ์ž„์‹œ ํŒŒ์ผ ๊ฒฝ๋กœ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
33
- with open(temp_filepath, "wb") as f: # ์ž„์‹œ ํŒŒ์ผ์„ ๋ฐ”์ด๋„ˆ๋ฆฌ ์“ฐ๊ธฐ ๋ชจ๋“œ๋กœ ์—ฝ๋‹ˆ๋‹ค.
34
- f.write(pdf_docs.getvalue()) # PDF ๋ฌธ์„œ์˜ ๋‚ด์šฉ์„ ์ž„์‹œ ํŒŒ์ผ์— ์”๋‹ˆ๋‹ค.
35
- pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoader๋ฅผ ์‚ฌ์šฉํ•ด PDF๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
36
- pdf_doc = pdf_loader.load() # ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.
37
- return pdf_doc # ์ถ”์ถœํ•œ ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
38
-
39
-
40
- def get_text_file(docs):
41
- #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
42
- return text_doc
43
-
44
-
45
- def get_csv_file(docs):
46
- #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
47
- return csv_doc
48
-
49
- # def get_json_file(docs):
50
- # temp_dir = tempfile.TemporaryDirectory()
51
- # temp_filepath = os.path.join(temp_dir.name, docs.name)
52
- # with open(temp_filepath, "wb") as f:
53
- # f.write(docs.getvalue())
54
- # json_loader = JSONLoader(temp_filepath,
55
- # jq_schema='.scans[].relationships',
56
- # text_content=False)
57
- #
58
- # json_doc = json_loader.load()
59
- # # print('json_doc = ',json_doc)
60
- # return json_doc
61
-
62
- def get_json_file(file) -> list[Document]:
63
- # Streamlit UploadedFile -> str
64
- raw = file.getvalue().decode("utf-8", errors="ignore")
65
- data = json.loads(raw)
66
-
67
- docs = []
68
-
69
- # ์˜ˆ์ „ jq ๊ฒฝ๋กœ๊ฐ€ '.scans[].relationships'์˜€๋‹ค๋ฉด, ๋™์ผํ•œ ์˜๋ฏธ๋กœ ํŒŒ์‹ฑ:
70
- # ์กด์žฌํ•˜๋ฉด ๊ทธ๊ฒƒ๋งŒ ๋ฝ‘๊ณ , ์—†์œผ๋ฉด ํ†ต์œผ๋กœ ๋ฌธ์„œํ™”
71
- def add_doc(x):
72
- docs.append(Document(page_content=json.dumps(x, ensure_ascii=False)))
73
-
74
- if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list):
75
- for s in data["scans"]:
76
- rels = s.get("relationships", [])
77
- if isinstance(rels, list) and rels:
78
- for r in rels:
79
- add_doc(r)
80
- if not docs: # ๊ทธ๋ž˜๋„ ๋ชป ๋ฝ‘์•˜์œผ๋ฉด ์ „์ฒด๋ฅผ ํ•˜๋‚˜๋กœ
81
- add_doc(data)
82
- elif isinstance(data, list):
83
- for item in data:
84
- add_doc(item)
85
- else:
86
- add_doc(data)
87
-
88
- return docs
89
-
90
- # ๋ฌธ์„œ๋“ค์„ ์ฒ˜๋ฆฌํ•˜์—ฌ ํ…์ŠคํŠธ ์ฒญํฌ๋กœ ๋‚˜๋ˆ„๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
91
- def get_text_chunks(documents):
92
- text_splitter = RecursiveCharacterTextSplitter(
93
- chunk_size=1000, # ์ฒญํฌ์˜ ํฌ๊ธฐ๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
94
- chunk_overlap=200, # ์ฒญํฌ ์‚ฌ์ด์˜ ์ค‘๋ณต์„ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
95
- length_function=len # ํ…์ŠคํŠธ์˜ ๊ธธ์ด๋ฅผ ์ธก์ •ํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
96
- )
97
-
98
- documents = text_splitter.split_documents(documents) # ๋ฌธ์„œ๋“ค์„ ์ฒญํฌ๋กœ ๋‚˜๋ˆ•๋‹ˆ๋‹ค.
99
- return documents # ๋‚˜๋ˆˆ ์ฒญํฌ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
100
-
101
-
102
- # ํ…์ŠคํŠธ ์ฒญํฌ๋“ค๋กœ๋ถ€ํ„ฐ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
103
- def get_vectorstore(text_chunks):
104
- # ์›ํ•˜๋Š” ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
105
- embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
106
- model_kwargs={'device': 'cpu'}) # ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค.
107
- vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
108
- return vectorstore # ์ƒ์„ฑ๋œ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
109
-
110
-
111
- def get_conversation_chain(vectorstore):
112
- # Groq LLM
113
- llm = ChatGroq(
114
- groq_api_key=os.environ.get("GROQ_API_KEY"),
115
- model_name="llama-3.1-8b-instant",
116
- temperature=0.75, # ํ•„์š”์— ๋งž๊ฒŒ ํŠœ๋‹
117
- max_tokens=512 # ์ปจํ…์ŠคํŠธ ์ดˆ๊ณผ ๋ฐฉ์ง€์šฉ (ํ•„์š”์‹œ ์กฐ์ •)
118
- )
119
-
120
- memory = ConversationBufferMemory(
121
- memory_key="chat_history",
122
- return_messages=True
123
- )
124
- retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
125
-
126
- conversation_chain = ConversationalRetrievalChain.from_llm(
127
- llm=llm,
128
- retriever=retriever,
129
- memory=memory,
130
- )
131
- return conversation_chain
132
-
133
- # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ์ฒ˜๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
134
- def handle_userinput(user_question):
135
- print('user_question => ', user_question)
136
- # ๋Œ€ํ™” ์ฒด์ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์šฉ์ž ์งˆ๋ฌธ์— ๋Œ€ํ•œ ์‘๋‹ต์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
137
- response = st.session_state.conversation({'question': user_question})
138
- # ๋Œ€ํ™” ๊ธฐ๋ก์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.
139
- st.session_state.chat_history = response['chat_history']
140
-
141
- for i, message in enumerate(st.session_state.chat_history):
142
- if i % 2 == 0:
143
- st.write(user_template.replace(
144
- "{{MSG}}", message.content), unsafe_allow_html=True)
145
- else:
146
- st.write(bot_template.replace(
147
- "{{MSG}}", message.content), unsafe_allow_html=True)
148
-
149
-
150
- def main():
151
- load_dotenv()
152
- st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama",
153
- page_icon=":books:")
154
- st.write(css, unsafe_allow_html=True)
155
-
156
- if "conversation" not in st.session_state:
157
- st.session_state.conversation = None
158
- if "chat_history" not in st.session_state:
159
- st.session_state.chat_history = None
160
-
161
- st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:")
162
- user_question = st.text_input("Ask a question about your documents:")
163
- if user_question:
164
- handle_userinput(user_question)
165
-
166
- with st.sidebar:
167
- st.subheader("Your documents")
168
- docs = st.file_uploader(
169
- "Upload your Files here and click on 'Process'", accept_multiple_files=True)
170
- if st.button("Process[PDF]"):
171
- with st.spinner("Processing"):
172
- # get pdf text
173
- doc_list = []
174
- for file in docs:
175
- print('file - type : ', file.type)
176
- if file.type in ['application/octet-stream', 'application/pdf']:
177
- # file is .pdf
178
- doc_list.extend(get_pdf_text(file))
179
- else:
180
- st.error("PDF ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
181
- if not doc_list:
182
- st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
183
- st.stop()
184
-
185
- text_chunks = get_text_chunks(doc_list)
186
- vectorstore = get_vectorstore(text_chunks)
187
- st.session_state.conversation = get_conversation_chain(vectorstore)
188
-
189
- ################## TXT, CSV ๋ฒ„ํŠผ ๊ตฌํ˜„
190
- # TXT ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/plain':
191
- # CSV ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/csv':
192
-
193
- if st.button("Process[JSON]"):
194
- with st.spinner("Processing"):
195
- # get txt text
196
- doc_list = []
197
- for file in docs:
198
- print('file - type : ', file.type)
199
- if file.type == 'application/json':
200
- # file is .json
201
- doc_list.extend(get_json_file(file))
202
- else:
203
- st.error("JSON ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
204
- if not doc_list:
205
- st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
206
- st.stop()
207
-
208
- text_chunks = get_text_chunks(doc_list)
209
- vectorstore = get_vectorstore(text_chunks)
210
- st.session_state.conversation = get_conversation_chain(vectorstore)
211
-
212
-
213
- if __name__ == '__main__':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  main()
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ # from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
4
+ # from langchain.vectorstores import FAISS
5
+ # from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
6
+ from langchain.memory import ConversationBufferMemory
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from htmlTemplates import css, bot_template, user_template
9
+ # from langchain.llms import LlamaCpp # For loading transformer models.
10
+ # from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
+ # ํ…์ŠคํŠธ ์Šคํ”Œ๋ฆฌํ„ฐ
12
+ from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter
13
+
14
+ # ๋ฒกํ„ฐ์Šคํ† ์–ด/์ž„๋ฒ ๋”ฉ/LLM
15
+ from langchain_community.vectorstores import FAISS
16
+ from langchain_community.embeddings import HuggingFaceEmbeddings
17
+
18
+ # ๋กœ๏ฟฝ๏ฟฝ๏ฟฝ๋“ค (pebblo/pwd ๋Œ๋ ค์˜ค์ง€ ์•Š๊ฒŒ ์„œ๋ธŒ๋ชจ๋“ˆ๋กœ)
19
+ from langchain_community.document_loaders.pdf import PyPDFLoader
20
+ from langchain_community.document_loaders.text import TextLoader
21
+ from langchain_community.document_loaders.csv_loader import CSVLoader
22
+ from langchain_community.document_loaders.json_loader import JSONLoader
23
+ import tempfile # ์ž„์‹œ ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค.
24
+ import os
25
+ import json
26
+ from langchain.docstore.document import Document
27
+ from langchain_groq import ChatGroq
28
+
29
+ # PDF ๋ฌธ์„œ๋กœ๋ถ€ํ„ฐ ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
30
+ def get_pdf_text(pdf_docs):
31
+ temp_dir = tempfile.TemporaryDirectory() # ์ž„์‹œ ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
32
+ temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # ์ž„์‹œ ํŒŒ์ผ ๊ฒฝ๋กœ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
33
+ with open(temp_filepath, "wb") as f: # ์ž„์‹œ ํŒŒ์ผ์„ ๋ฐ”์ด๋„ˆ๋ฆฌ ์“ฐ๊ธฐ ๋ชจ๋“œ๋กœ ์—ฝ๋‹ˆ๋‹ค.
34
+ f.write(pdf_docs.getvalue()) # PDF ๋ฌธ์„œ์˜ ๋‚ด์šฉ์„ ์ž„์‹œ ํŒŒ์ผ์— ์”๋‹ˆ๋‹ค.
35
+ pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoader๋ฅผ ์‚ฌ์šฉํ•ด PDF๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
36
+ pdf_doc = pdf_loader.load() # ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.
37
+ return pdf_doc # ์ถ”์ถœํ•œ ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
38
+
39
+
40
+ def get_text_file(docs):
41
+ #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
42
+
43
+ temp_dir = tempfile.TemporaryDirectory()
44
+ temp_filepath = os.path.join(temp_dir.name, docs.name)
45
+
46
+
47
+ with open(temp_filepath, "wb") as f:
48
+ f.write(docs.getvalue())
49
+
50
+
51
+ text_loader = TextLoader(temp_filepath, encoding="utf-8")
52
+ text_doc = text_loader.load() # -> List[Document]
53
+
54
+ return text_doc
55
+
56
+
57
+ def get_csv_file(docs):
58
+ #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
59
+
60
+ temp_dir = tempfile.TemporaryDirectory()
61
+ temp_filepath = os.path.join(temp_dir.name, docs.name)
62
+
63
+
64
+ with open(temp_filepath, "wb") as f:
65
+ f.write(docs.getvalue())
66
+
67
+
68
+ csv_loader = CSVLoader(
69
+ file_path=temp_filepath,
70
+ encoding="utf-8",
71
+ )
72
+ csv_doc = csv_loader.load()
73
+ return csv_doc
74
+
75
+ # def get_json_file(docs):
76
+ # temp_dir = tempfile.TemporaryDirectory()
77
+ # temp_filepath = os.path.join(temp_dir.name, docs.name)
78
+ # with open(temp_filepath, "wb") as f:
79
+ # f.write(docs.getvalue())
80
+ # json_loader = JSONLoader(temp_filepath,
81
+ # jq_schema='.scans[].relationships',
82
+ # text_content=False)
83
+ #
84
+ # json_doc = json_loader.load()
85
+ # # print('json_doc = ',json_doc)
86
+ # return json_doc
87
+
88
+ def get_json_file(file) -> list[Document]:
89
+ # Streamlit UploadedFile -> str
90
+ raw = file.getvalue().decode("utf-8", errors="ignore")
91
+ data = json.loads(raw)
92
+
93
+ docs = []
94
+
95
+ # ์˜ˆ์ „ jq ๊ฒฝ๋กœ๊ฐ€ '.scans[].relationships'์˜€๋‹ค๋ฉด, ๋™์ผํ•œ ์˜๋ฏธ๋กœ ํŒŒ์‹ฑ:
96
+ # ์กด์žฌํ•˜๋ฉด ๊ทธ๊ฒƒ๋งŒ ๋ฝ‘๊ณ , ์—†์œผ๋ฉด ํ†ต์œผ๋กœ ๋ฌธ์„œํ™”
97
+ def add_doc(x):
98
+ docs.append(Document(page_content=json.dumps(x, ensure_ascii=False)))
99
+
100
+ if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list):
101
+ for s in data["scans"]:
102
+ rels = s.get("relationships", [])
103
+ if isinstance(rels, list) and rels:
104
+ for r in rels:
105
+ add_doc(r)
106
+ if not docs: # ๊ทธ๋ž˜๋„ ๋ชป ๋ฝ‘์•˜์œผ๋ฉด ์ „์ฒด๋ฅผ ํ•˜๋‚˜๋กœ
107
+ add_doc(data)
108
+ elif isinstance(data, list):
109
+ for item in data:
110
+ add_doc(item)
111
+ else:
112
+ add_doc(data)
113
+
114
+ return docs
115
+
116
+ # ๋ฌธ์„œ๋“ค์„ ์ฒ˜๋ฆฌํ•˜์—ฌ ํ…์ŠคํŠธ ์ฒญํฌ๋กœ ๋‚˜๋ˆ„๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
117
+ def get_text_chunks(documents):
118
+ text_splitter = RecursiveCharacterTextSplitter(
119
+ chunk_size=1000, # ์ฒญํฌ์˜ ํฌ๊ธฐ๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
120
+ chunk_overlap=200, # ์ฒญํฌ ์‚ฌ์ด์˜ ์ค‘๋ณต์„ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
121
+ length_function=len # ํ…์ŠคํŠธ์˜ ๊ธธ์ด๋ฅผ ์ธก์ •ํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
122
+ )
123
+
124
+ documents = text_splitter.split_documents(documents) # ๋ฌธ์„œ๋“ค์„ ์ฒญํฌ๋กœ ๋‚˜๋ˆ•๋‹ˆ๋‹ค.
125
+ return documents # ๋‚˜๋ˆˆ ์ฒญํฌ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
126
+
127
+
128
+ # ํ…์ŠคํŠธ ์ฒญํฌ๋“ค๋กœ๋ถ€ํ„ฐ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
129
+ def get_vectorstore(text_chunks):
130
+ # ์›ํ•˜๋Š” ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
131
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
132
+ model_kwargs={'device': 'cpu'}) # ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค.
133
+ vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
134
+ return vectorstore # ์ƒ์„ฑ๋œ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
135
+
136
+
137
+ def get_conversation_chain(vectorstore):
138
+ # Groq LLM
139
+ llm = ChatGroq(
140
+ groq_api_key=os.environ.get("GROQ_API_KEY"),
141
+ model_name="llama-3.1-8b-instant",
142
+ temperature=0.75, # ํ•„์š”์— ๋งž๊ฒŒ ํŠœ๋‹
143
+ max_tokens=512 # ์ปจํ…์ŠคํŠธ ์ดˆ๊ณผ ๋ฐฉ์ง€์šฉ (ํ•„์š”์‹œ ์กฐ์ •)
144
+ )
145
+
146
+ memory = ConversationBufferMemory(
147
+ memory_key="chat_history",
148
+ return_messages=True
149
+ )
150
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
151
+
152
+ conversation_chain = ConversationalRetrievalChain.from_llm(
153
+ llm=llm,
154
+ retriever=retriever,
155
+ memory=memory,
156
+ )
157
+ return conversation_chain
158
+
159
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ์ฒ˜๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
160
+ def handle_userinput(user_question):
161
+ print('user_question => ', user_question)
162
+ # ๋Œ€ํ™” ์ฒด์ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์šฉ์ž ์งˆ๋ฌธ์— ๋Œ€ํ•œ ์‘๋‹ต์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
163
+ response = st.session_state.conversation({'question': user_question})
164
+ # ๋Œ€ํ™” ๊ธฐ๋ก์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.
165
+ st.session_state.chat_history = response['chat_history']
166
+
167
+ for i, message in enumerate(st.session_state.chat_history):
168
+ if i % 2 == 0:
169
+ st.write(user_template.replace(
170
+ "{{MSG}}", message.content), unsafe_allow_html=True)
171
+ else:
172
+ st.write(bot_template.replace(
173
+ "{{MSG}}", message.content), unsafe_allow_html=True)
174
+
175
+
176
+ def main():
177
+ load_dotenv()
178
+ st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama",
179
+ page_icon=":books:")
180
+ st.write(css, unsafe_allow_html=True)
181
+
182
+ if "conversation" not in st.session_state:
183
+ st.session_state.conversation = None
184
+ if "chat_history" not in st.session_state:
185
+ st.session_state.chat_history = None
186
+
187
+ st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:")
188
+ user_question = st.text_input("Ask a question about your documents:")
189
+ if user_question:
190
+ handle_userinput(user_question)
191
+
192
+ with st.sidebar:
193
+ st.subheader("Your documents")
194
+ docs = st.file_uploader(
195
+ "Upload your Files here and click on 'Process'", accept_multiple_files=True)
196
+ if st.button("Process[PDF]"):
197
+ with st.spinner("Processing"):
198
+ # get pdf text
199
+ doc_list = []
200
+ for file in docs:
201
+ print('file - type : ', file.type)
202
+ if file.type in ['application/octet-stream', 'application/pdf']:
203
+ # file is .pdf
204
+ doc_list.extend(get_pdf_text(file))
205
+ else:
206
+ st.error("PDF ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
207
+ if not doc_list:
208
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
209
+ st.stop()
210
+
211
+ text_chunks = get_text_chunks(doc_list)
212
+ vectorstore = get_vectorstore(text_chunks)
213
+ st.session_state.conversation = get_conversation_chain(vectorstore)
214
+
215
+ ################## TXT, CSV ๋ฒ„ํŠผ ๊ตฌํ˜„
216
+ # TXT ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/plain':
217
+ # CSV ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/csv':
218
+
219
+ if st.button("Process[JSON]"):
220
+ with st.spinner("Processing"):
221
+ # get txt text
222
+ doc_list = []
223
+ for file in docs:
224
+ print('file - type : ', file.type)
225
+ if file.type == 'application/json':
226
+ # file is .json
227
+ doc_list.extend(get_json_file(file))
228
+ else:
229
+ st.error("JSON ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
230
+ if not doc_list:
231
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
232
+ st.stop()
233
+
234
+ text_chunks = get_text_chunks(doc_list)
235
+ vectorstore = get_vectorstore(text_chunks)
236
+ st.session_state.conversation = get_conversation_chain(vectorstore)
237
+
238
+
239
+ if __name__ == '__main__':
240
  main()