EverJun2 commited on
Commit
2d1d981
ยท
verified ยท
1 Parent(s): 6cd0edd

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +258 -258
src/streamlit_app.py CHANGED
@@ -1,259 +1,259 @@
1
- import streamlit as st
2
- from dotenv import load_dotenv
3
- # from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
4
- # from langchain.vectorstores import FAISS
5
- # from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
6
- from langchain.memory import ConversationBufferMemory
7
- from langchain.chains import ConversationalRetrievalChain
8
- from htmlTemplates import css, bot_template, user_template
9
- # from langchain.llms import LlamaCpp # For loading transformer models.
10
- # from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
- # ํ…์ŠคํŠธ ์Šคํ”Œ๋ฆฌํ„ฐ
12
- from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter
13
-
14
- # ๋ฒกํ„ฐ์Šคํ† ์–ด/์ž„๋ฒ ๋”ฉ/LLM
15
- from langchain_community.vectorstores import FAISS
16
- from langchain_community.embeddings import HuggingFaceEmbeddings
17
-
18
- # ๋กœ๋”๋“ค (pebblo/pwd ๋Œ๋ ค์˜ค์ง€ ์•Š๊ฒŒ ์„œ๋ธŒ๋ชจ๋“ˆ๋กœ)
19
- from langchain_community.document_loaders.pdf import PyPDFLoader
20
- from langchain_community.document_loaders.text import TextLoader
21
- from langchain_community.document_loaders.csv_loader import CSVLoader
22
- from langchain_community.document_loaders.json_loader import JSONLoader
23
- import tempfile # ์ž„์‹œ ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค.
24
- import os
25
- import json
26
- from langchain.docstore.document import Document
27
- from langchain_groq import ChatGroq
28
-
29
- # PDF ๋ฌธ์„œ๋กœ๋ถ€ํ„ฐ ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
30
- def get_pdf_text(pdf_docs):
31
- temp_dir = tempfile.TemporaryDirectory() # ์ž„์‹œ ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
32
- temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # ์ž„์‹œ ํŒŒ์ผ ๊ฒฝ๋กœ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
33
- with open(temp_filepath, "wb") as f: # ์ž„์‹œ ํŒŒ์ผ์„ ๋ฐ”์ด๋„ˆ๋ฆฌ ์“ฐ๊ธฐ ๋ชจ๋“œ๋กœ ์—ฝ๋‹ˆ๋‹ค.
34
- f.write(pdf_docs.getvalue()) # PDF ๋ฌธ์„œ์˜ ๋‚ด์šฉ์„ ์ž„์‹œ ํŒŒ์ผ์— ์”๋‹ˆ๋‹ค.
35
- pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoader๋ฅผ ์‚ฌ์šฉํ•ด PDF๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
36
- pdf_doc = pdf_loader.load() # ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.
37
- return pdf_doc # ์ถ”์ถœํ•œ ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
38
-
39
-
40
- def get_text_file(docs):
41
- #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
42
- temp_dir = tempfile.TemporaryDirectory()
43
- temp_filepath = os.path.join(temp_dir.name, docs.name)
44
- with open(temp_filepath, "wb") as f:
45
- f.write(docs.getvalue())
46
- docs_loader = TextLoader(temp_filepath)
47
- text_doc = docs_loader.load()
48
- return text_doc
49
-
50
-
51
- def get_csv_file(docs):
52
- #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
53
- temp_dir = tempfile.TemporaryDirectory()
54
- temp_filepath = os.path.join(temp_dir.name, docs.name)
55
- with open(temp_filepath, "wb") as f:
56
- f.write(docs.getvalue())
57
- csv_loader = CSVLoader(temp_filepath)
58
- csv_doc = csv_loader.load()
59
- return csv_doc
60
-
61
- # def get_json_file(docs):
62
- # temp_dir = tempfile.TemporaryDirectory()
63
- # temp_filepath = os.path.join(temp_dir.name, docs.name)
64
- # with open(temp_filepath, "wb") as f:
65
- # f.write(docs.getvalue())
66
- # json_loader = JSONLoader(temp_filepath,
67
- # jq_schema='.scans[].relationships',
68
- # text_content=False)
69
- #
70
- # json_doc = json_loader.load()
71
- # # print('json_doc = ',json_doc)
72
- # return json_doc
73
-
74
- def get_json_file(file) -> list[Document]:
75
- # Streamlit UploadedFile -> str
76
- raw = file.getvalue().decode("utf-8", errors="ignore")
77
- data = json.loads(raw)
78
-
79
- docs = []
80
-
81
- # ์˜ˆ์ „ jq ๊ฒฝ๋กœ๊ฐ€ '.scans[].relationships'์˜€๋‹ค๋ฉด, ๋™์ผํ•œ ์˜๋ฏธ๋กœ ํŒŒ์‹ฑ:
82
- # ์กด์žฌํ•˜๋ฉด ๊ทธ๊ฒƒ๋งŒ ๋ฝ‘๊ณ , ์—†์œผ๋ฉด ํ†ต์œผ๋กœ ๋ฌธ์„œํ™”
83
- def add_doc(x):
84
- docs.append(Document(page_content=json.dumps(x, ensure_ascii=False)))
85
-
86
- if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list):
87
- for s in data["scans"]:
88
- rels = s.get("relationships", [])
89
- if isinstance(rels, list) and rels:
90
- for r in rels:
91
- add_doc(r)
92
- if not docs: # ๊ทธ๋ž˜๋„ ๋ชป ๋ฝ‘์•˜์œผ๋ฉด ์ „์ฒด๋ฅผ ํ•˜๋‚˜๋กœ
93
- add_doc(data)
94
- elif isinstance(data, list):
95
- for item in data:
96
- add_doc(item)
97
- else:
98
- add_doc(data)
99
-
100
- return docs
101
-
102
- # ๋ฌธ์„œ๋“ค์„ ์ฒ˜๋ฆฌํ•˜์—ฌ ํ…์ŠคํŠธ ์ฒญํฌ๋กœ ๋‚˜๋ˆ„๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
103
- def get_text_chunks(documents):
104
- text_splitter = RecursiveCharacterTextSplitter(
105
- chunk_size=1000, # ์ฒญํฌ์˜ ํฌ๊ธฐ๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
106
- chunk_overlap=200, # ์ฒญํฌ ์‚ฌ์ด์˜ ์ค‘๋ณต์„ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
107
- length_function=len # ํ…์ŠคํŠธ์˜ ๊ธธ์ด๋ฅผ ์ธก์ •ํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
108
- )
109
-
110
- documents = text_splitter.split_documents(documents) # ๋ฌธ์„œ๋“ค์„ ์ฒญํฌ๋กœ ๋‚˜๋ˆ•๋‹ˆ๋‹ค.
111
- return documents # ๋‚˜๋ˆˆ ์ฒญํฌ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
112
-
113
-
114
- # ํ…์ŠคํŠธ ์ฒญํฌ๋“ค๋กœ๋ถ€ํ„ฐ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•˜๏ฟฝ๏ฟฝ๏ฟฝ ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
115
- def get_vectorstore(text_chunks):
116
- # ์›ํ•˜๋Š” ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
117
- embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
118
- model_kwargs={'device': 'cpu'}) # ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค.
119
- vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
120
- return vectorstore # ์ƒ์„ฑ๋œ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
121
-
122
-
123
- def get_conversation_chain(vectorstore):
124
- # Groq LLM
125
- llm = ChatGroq(
126
- groq_api_key=os.environ.get("GROQ_API_KEY"),
127
- model_name="llama-3.1-8b-instant",
128
- temperature=0.75, # ํ•„์š”์— ๋งž๊ฒŒ ํŠœ๋‹
129
- max_tokens=512 # ์ปจํ…์ŠคํŠธ ์ดˆ๊ณผ ๋ฐฉ์ง€์šฉ (ํ•„์š”์‹œ ์กฐ์ •)
130
- )
131
-
132
- memory = ConversationBufferMemory(
133
- memory_key="chat_history",
134
- return_messages=True
135
- )
136
- retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
137
-
138
- conversation_chain = ConversationalRetrievalChain.from_llm(
139
- llm=llm,
140
- retriever=retriever,
141
- memory=memory,
142
- )
143
- return conversation_chain
144
-
145
- # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ์ฒ˜๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
146
- def handle_userinput(user_question):
147
- print('user_question => ', user_question)
148
- # ๋Œ€ํ™” ์ฒด์ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์šฉ์ž ์งˆ๋ฌธ์— ๋Œ€ํ•œ ์‘๋‹ต์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
149
- response = st.session_state.conversation({'question': user_question})
150
- # ๋Œ€ํ™” ๊ธฐ๋ก์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.
151
- st.session_state.chat_history = response['chat_history']
152
-
153
- for i, message in enumerate(st.session_state.chat_history):
154
- if i % 2 == 0:
155
- st.write(user_template.replace(
156
- "{{MSG}}", message.content), unsafe_allow_html=True)
157
- else:
158
- st.write(bot_template.replace(
159
- "{{MSG}}", message.content), unsafe_allow_html=True)
160
-
161
-
162
- def main():
163
- load_dotenv()
164
- st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama",
165
- page_icon=":books:")
166
- st.write(css, unsafe_allow_html=True)
167
-
168
- if "conversation" not in st.session_state:
169
- st.session_state.conversation = None
170
- if "chat_history" not in st.session_state:
171
- st.session_state.chat_history = None
172
-
173
- st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:")
174
- user_question = st.text_input("Ask a question about your documents:")
175
- if user_question:
176
- handle_userinput(user_question)
177
-
178
- with st.sidebar:
179
- st.subheader("Your documents")
180
- docs = st.file_uploader(
181
- "Upload your Files here and click on 'Process'", accept_multiple_files=True)
182
- if st.button("Process[PDF]"):
183
- with st.spinner("Processing"):
184
- # get pdf text
185
- doc_list = []
186
- for file in docs:
187
- print('file - type : ', file.type)
188
- if file.type in ['application/octet-stream', 'application/pdf']:
189
- # file is .pdf
190
- doc_list.extend(get_pdf_text(file))
191
- else:
192
- st.error("PDF ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
193
- if not doc_list:
194
- st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
195
- st.stop()
196
-
197
- text_chunks = get_text_chunks(doc_list)
198
- vectorstore = get_vectorstore(text_chunks)
199
- st.session_state.conversation = get_conversation_chain(vectorstore)
200
-
201
- ################## TXT, CSV ๋ฒ„ํŠผ ๊ตฌํ˜„
202
- # TXT ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/plain':
203
- if st.button("Process[TXT]"):
204
- with st.spinner("Processing"):
205
- doc_list = []
206
- for file in docs:
207
- print('file - type : ', file.type)
208
- if file.type == 'text/plain':
209
- doc_list.extend(get_text_file(file))
210
- else:
211
- st.error("TXT ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
212
- if not doc_list:
213
- st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
214
- st.stop()
215
-
216
- text_chunks = get_text_chunks(doc_list)
217
- vectorstore = get_vectorstore(text_chunks)
218
- st.session_state.conversation = get_conversation_chain(vectorstore)
219
-
220
- # CSV ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/csv':
221
- if st.button("Process[CSV]"):
222
- with st.spinner("Processing"):
223
- doc_list = []
224
- for file in docs:
225
- print('file - type : ', file.type)
226
- if file.type == 'text/csv':
227
- doc_list.extend(get_csv_file(file))
228
- else:
229
- st.error("CSV ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
230
- if not doc_list:
231
- st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
232
- st.stop()
233
-
234
- text_chunks = get_text_chunks(doc_list)
235
- vectorstore = get_vectorstore(text_chunks)
236
- st.session_state.conversation = get_conversation_chain(vectorstore)
237
-
238
- if st.button("Process[JSON]"):
239
- with st.spinner("Processing"):
240
- # get txt text
241
- doc_list = []
242
- for file in docs:
243
- print('file - type : ', file.type)
244
- if file.type == 'application/json':
245
- # file is .json
246
- doc_list.extend(get_json_file(file))
247
- else:
248
- st.error("JSON ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
249
- if not doc_list:
250
- st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
251
- st.stop()
252
-
253
- text_chunks = get_text_chunks(doc_list)
254
- vectorstore = get_vectorstore(text_chunks)
255
- st.session_state.conversation = get_conversation_chain(vectorstore)
256
-
257
-
258
- if __name__ == '__main__':
259
  main()
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ # from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
4
+ # from langchain.vectorstores import FAISS
5
+ # from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
6
+ from langchain.memory import ConversationBufferMemory
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from htmlTemplates import css, bot_template, user_template
9
+ # from langchain.llms import LlamaCpp # For loading transformer models.
10
+ # from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
+ # ํ…์ŠคํŠธ ์Šคํ”Œ๋ฆฌํ„ฐ
12
+ from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter
13
+
14
+ # ๋ฒกํ„ฐ์Šคํ† ์–ด/์ž„๋ฒ ๋”ฉ/LLM
15
+ from langchain_community.vectorstores import FAISS
16
+ from langchain_community.embeddings import HuggingFaceEmbeddings
17
+
18
+ # ๋กœ๋”๋“ค (pebblo/pwd ๋Œ๋ ค์˜ค์ง€ ์•Š๊ฒŒ ์„œ๋ธŒ๋ชจ๋“ˆ๋กœ)
19
+ from langchain_community.document_loaders.pdf import PyPDFLoader
20
+ from langchain_community.document_loaders.text import TextLoader
21
+ from langchain_community.document_loaders.csv_loader import CSVLoader
22
+ from langchain_community.document_loaders.json_loader import JSONLoader
23
+ import tempfile # ์ž„์‹œ ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค.
24
+ import os
25
+ import json
26
+ from langchain.docstore.document import Document
27
+ from langchain_groq import ChatGroq
28
+
29
+ # PDF ๋ฌธ์„œ๋กœ๋ถ€ํ„ฐ ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
30
+ def get_pdf_text(pdf_docs):
31
+ temp_dir = tempfile.TemporaryDirectory() # ์ž„์‹œ ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
32
+ temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # ์ž„์‹œ ํŒŒ์ผ ๊ฒฝ๋กœ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
33
+ with open(temp_filepath, "wb") as f: # ์ž„์‹œ ํŒŒ์ผ์„ ๋ฐ”์ด๋„ˆ๋ฆฌ ์“ฐ๊ธฐ ๋ชจ๋“œ๋กœ ์—ฝ๋‹ˆ๋‹ค.
34
+ f.write(pdf_docs.getvalue()) # PDF ๋ฌธ์„œ์˜ ๋‚ด์šฉ์„ ์ž„์‹œ ํŒŒ์ผ์— ์”๋‹ˆ๋‹ค.
35
+ pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoader๋ฅผ ์‚ฌ์šฉํ•ด PDF๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
36
+ pdf_doc = pdf_loader.load() # ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.
37
+ return pdf_doc # ์ถ”์ถœํ•œ ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
38
+
39
+
40
+ def get_text_file(docs):
41
+ #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
42
+ temp_dir = tempfile.TemporaryDirectory()
43
+ temp_filepath = os.path.join(temp_dir.name, docs.name)
44
+ with open(temp_filepath, "wb") as f:
45
+ f.write(docs.getvalue())
46
+ docs_loader = TextLoader(temp_filepath)
47
+ text_doc = docs_loader.load()
48
+ return text_doc
49
+
50
+
51
+ def get_csv_file(docs):
52
+ #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
53
+ temp_dir = tempfile.TemporaryDirectory()
54
+ temp_filepath = os.path.join(temp_dir.name, docs.name)
55
+ with open(temp_filepath, "wb") as f:
56
+ f.write(docs.getvalue())
57
+ csv_loader = CSVLoader(temp_filepath)
58
+ csv_doc = csv_loader.load()
59
+ return csv_doc
60
+
61
+ # def get_json_file(docs):
62
+ # temp_dir = tempfile.TemporaryDirectory()
63
+ # temp_filepath = os.path.join(temp_dir.name, docs.name)
64
+ # with open(temp_filepath, "wb") as f:
65
+ # f.write(docs.getvalue())
66
+ # json_loader = JSONLoader(temp_filepath,
67
+ # jq_schema='.scans[].relationships',
68
+ # text_content=False)
69
+ #
70
+ # json_doc = json_loader.load()
71
+ # # print('json_doc = ',json_doc)
72
+ # return json_doc
73
+
74
+ def get_json_file(file) -> list[Document]:
75
+ # Streamlit UploadedFile -> str
76
+ raw = file.getvalue().decode("utf-8", errors="ignore")
77
+ data = json.loads(raw)
78
+
79
+ docs = []
80
+
81
+ # ์˜ˆ์ „ jq ๊ฒฝ๋กœ๊ฐ€ '.scans[].relationships'์˜€๋‹ค๋ฉด, ๋™์ผํ•œ ์˜๋ฏธ๋กœ ํŒŒ์‹ฑ:
82
+ # ์กด์žฌํ•˜๋ฉด ๊ทธ๊ฒƒ๋งŒ ๋ฝ‘๊ณ , ์—†์œผ๋ฉด ํ†ต์œผ๋กœ ๋ฌธ์„œํ™”
83
+ def add_doc(x):
84
+ docs.append(Document(page_content=json.dumps(x, ensure_ascii=False)))
85
+
86
+ if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list):
87
+ for s in data["scans"]:
88
+ rels = s.get("relationships", [])
89
+ if isinstance(rels, list) and rels:
90
+ for r in rels:
91
+ add_doc(r)
92
+ if not docs: # ๊ทธ๋ž˜๋„ ๋ชป ๋ฝ‘์•˜์œผ๋ฉด ์ „์ฒด๋ฅผ ํ•˜๋‚˜๋กœ
93
+ add_doc(data)
94
+ elif isinstance(data, list):
95
+ for item in data:
96
+ add_doc(item)
97
+ else:
98
+ add_doc(data)
99
+
100
+ return docs
101
+
102
+ # ๋ฌธ์„œ๋“ค์„ ์ฒ˜๋ฆฌํ•˜์—ฌ ํ…์ŠคํŠธ ์ฒญํฌ๋กœ ๋‚˜๋ˆ„๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
103
+ def get_text_chunks(documents):
104
+ text_splitter = RecursiveCharacterTextSplitter(
105
+ chunk_size=1000, # ์ฒญํฌ์˜ ํฌ๊ธฐ๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
106
+ chunk_overlap=200, # ์ฒญํฌ ์‚ฌ์ด์˜ ์ค‘๋ณต์„ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
107
+ length_function=len # ํ…์ŠคํŠธ์˜ ๊ธธ์ด๋ฅผ ์ธก์ •ํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
108
+ )
109
+
110
+ documents = text_splitter.split_documents(documents) # ๋ฌธ์„œ๋“ค์„ ์ฒญํฌ๋กœ ๋‚˜๋ˆ•๋‹ˆ๋‹ค.
111
+ return documents # ๋‚˜๋ˆˆ ์ฒญํฌ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
112
+
113
+
114
+ # ํ…์ŠคํŠธ ์ฒญํฌ๋“ค๋กœ๋ถ€ํ„ฐ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
115
+ def get_vectorstore(text_chunks):
116
+ # ์›ํ•˜๋Š” ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
117
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-mpnet-base-v2',
118
+ model_kwargs={'device': 'cpu'}) # ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค.
119
+ vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
120
+ return vectorstore # ์ƒ์„ฑ๋œ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
121
+
122
+
123
+ def get_conversation_chain(vectorstore):
124
+ # Groq LLM
125
+ llm = ChatGroq(
126
+ groq_api_key=os.environ.get("GROQ_API_KEY"),
127
+ model_name="llama-3.1-8b-instant",
128
+ temperature=0.75, # ํ•„์š”์— ๋งž๊ฒŒ ํŠœ๋‹
129
+ max_tokens=512 # ์ปจํ…์ŠคํŠธ ์ดˆ๊ณผ ๋ฐฉ์ง€์šฉ (ํ•„์š”์‹œ ์กฐ์ •)
130
+ )
131
+
132
+ memory = ConversationBufferMemory(
133
+ memory_key="chat_history",
134
+ return_messages=True
135
+ )
136
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
137
+
138
+ conversation_chain = ConversationalRetrievalChain.from_llm(
139
+ llm=llm,
140
+ retriever=retriever,
141
+ memory=memory,
142
+ )
143
+ return conversation_chain
144
+
145
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ์ฒ˜๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
146
+ def handle_userinput(user_question):
147
+ print('user_question => ', user_question)
148
+ # ๋Œ€ํ™” ์ฒด์ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์šฉ์ž ์งˆ๋ฌธ์— ๋Œ€ํ•œ ์‘๋‹ต์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
149
+ response = st.session_state.conversation({'question': user_question})
150
+ # ๋Œ€ํ™” ๊ธฐ๋ก์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.
151
+ st.session_state.chat_history = response['chat_history']
152
+
153
+ for i, message in enumerate(st.session_state.chat_history):
154
+ if i % 2 == 0:
155
+ st.write(user_template.replace(
156
+ "{{MSG}}", message.content), unsafe_allow_html=True)
157
+ else:
158
+ st.write(bot_template.replace(
159
+ "{{MSG}}", message.content), unsafe_allow_html=True)
160
+
161
+
162
+ def main():
163
+ load_dotenv()
164
+ st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama",
165
+ page_icon=":books:")
166
+ st.write(css, unsafe_allow_html=True)
167
+
168
+ if "conversation" not in st.session_state:
169
+ st.session_state.conversation = None
170
+ if "chat_history" not in st.session_state:
171
+ st.session_state.chat_history = None
172
+
173
+ st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:")
174
+ user_question = st.text_input("Ask a question about your documents:")
175
+ if user_question:
176
+ handle_userinput(user_question)
177
+
178
+ with st.sidebar:
179
+ st.subheader("Your documents")
180
+ docs = st.file_uploader(
181
+ "Upload your Files here and click on 'Process'", accept_multiple_files=True)
182
+ if st.button("Process[PDF]"):
183
+ with st.spinner("Processing"):
184
+ # get pdf text
185
+ doc_list = []
186
+ for file in docs:
187
+ print('file - type : ', file.type)
188
+ if file.type in ['application/octet-stream', 'application/pdf']:
189
+ # file is .pdf
190
+ doc_list.extend(get_pdf_text(file))
191
+ else:
192
+ st.error("PDF ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
193
+ if not doc_list:
194
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
195
+ st.stop()
196
+
197
+ text_chunks = get_text_chunks(doc_list)
198
+ vectorstore = get_vectorstore(text_chunks)
199
+ st.session_state.conversation = get_conversation_chain(vectorstore)
200
+
201
+ ################## TXT, CSV ๋ฒ„ํŠผ ๊ตฌํ˜„
202
+ # TXT ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/plain':
203
+ if st.button("Process[TXT]"):
204
+ with st.spinner("Processing"):
205
+ doc_list = []
206
+ for file in docs:
207
+ print('file - type : ', file.type)
208
+ if file.type == 'text/plain':
209
+ doc_list.extend(get_text_file(file))
210
+ else:
211
+ st.error("TXT ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
212
+ if not doc_list:
213
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
214
+ st.stop()
215
+
216
+ text_chunks = get_text_chunks(doc_list)
217
+ vectorstore = get_vectorstore(text_chunks)
218
+ st.session_state.conversation = get_conversation_chain(vectorstore)
219
+
220
+ # CSV ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/csv':
221
+ if st.button("Process[CSV]"):
222
+ with st.spinner("Processing"):
223
+ doc_list = []
224
+ for file in docs:
225
+ print('file - type : ', file.type)
226
+ if file.type == 'text/csv':
227
+ doc_list.extend(get_csv_file(file))
228
+ else:
229
+ st.error("CSV ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
230
+ if not doc_list:
231
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
232
+ st.stop()
233
+
234
+ text_chunks = get_text_chunks(doc_list)
235
+ vectorstore = get_vectorstore(text_chunks)
236
+ st.session_state.conversation = get_conversation_chain(vectorstore)
237
+
238
+ if st.button("Process[JSON]"):
239
+ with st.spinner("Processing"):
240
+ # get txt text
241
+ doc_list = []
242
+ for file in docs:
243
+ print('file - type : ', file.type)
244
+ if file.type == 'application/json':
245
+ # file is .json
246
+ doc_list.extend(get_json_file(file))
247
+ else:
248
+ st.error("JSON ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
249
+ if not doc_list:
250
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
251
+ st.stop()
252
+
253
+ text_chunks = get_text_chunks(doc_list)
254
+ vectorstore = get_vectorstore(text_chunks)
255
+ st.session_state.conversation = get_conversation_chain(vectorstore)
256
+
257
+
258
+ if __name__ == '__main__':
259
  main()