Update app.py
Browse files
app.py
CHANGED
|
@@ -11,7 +11,6 @@ from langchain.chains import ConversationalRetrievalChain
|
|
| 11 |
from htmlTemplates import css, bot_template, user_template
|
| 12 |
from langchain.llms import HuggingFaceHub, LlamaCpp, CTransformers # For loading transformer models.
|
| 13 |
from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
|
| 14 |
-
from io import TextIOWrapper
|
| 15 |
import tempfile # μμ νμΌμ μμ±νκΈ° μν λΌμ΄λΈλ¬λ¦¬μ
λλ€.
|
| 16 |
import os
|
| 17 |
|
|
@@ -57,33 +56,13 @@ def get_json_file(docs):
|
|
| 57 |
# λ¬Έμλ€μ μ²λ¦¬νμ¬ ν
μ€νΈ μ²ν¬λ‘ λλλ ν¨μμ
λλ€.
|
| 58 |
def get_text_chunks(documents):
|
| 59 |
text_splitter = RecursiveCharacterTextSplitter(
|
| 60 |
-
chunk_size=1000,
|
| 61 |
-
chunk_overlap=200,
|
| 62 |
-
length_function=len
|
| 63 |
)
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
for doc in documents:
|
| 68 |
-
if isinstance(doc, str):
|
| 69 |
-
# If the document is a string, treat it as plain text
|
| 70 |
-
text_chunks.append(doc)
|
| 71 |
-
elif hasattr(doc, 'page_content'):
|
| 72 |
-
# If the document has a 'page_content' attribute, use it
|
| 73 |
-
text_chunks.append(doc.page_content)
|
| 74 |
-
else:
|
| 75 |
-
# Handle other types of documents as needed
|
| 76 |
-
# For example, if it's a list of strings, concatenate them
|
| 77 |
-
if isinstance(doc, list) and all(isinstance(item, str) for item in doc):
|
| 78 |
-
text_chunks.append(' '.join(doc))
|
| 79 |
-
else:
|
| 80 |
-
# Handle other cases based on the actual structure of your documents
|
| 81 |
-
raise ValueError(f"Unsupported document type: {type(doc)}")
|
| 82 |
-
|
| 83 |
-
# Split the text chunks
|
| 84 |
-
text_chunks = text_splitter.split_documents(text_chunks)
|
| 85 |
-
|
| 86 |
-
return text_chunks
|
| 87 |
|
| 88 |
|
| 89 |
# ν
μ€νΈ μ²ν¬λ€λ‘λΆν° λ²‘ν° μ€ν μ΄λ₯Ό μμ±νλ ν¨μμ
λλ€.
|
|
@@ -183,4 +162,4 @@ def main():
|
|
| 183 |
|
| 184 |
|
| 185 |
if __name__ == '__main__':
|
| 186 |
-
main()
|
|
|
|
| 11 |
from htmlTemplates import css, bot_template, user_template
|
| 12 |
from langchain.llms import HuggingFaceHub, LlamaCpp, CTransformers # For loading transformer models.
|
| 13 |
from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
|
|
|
|
| 14 |
import tempfile # μμ νμΌμ μμ±νκΈ° μν λΌμ΄λΈλ¬λ¦¬μ
λλ€.
|
| 15 |
import os
|
| 16 |
|
|
|
|
| 56 |
# λ¬Έμλ€μ μ²λ¦¬νμ¬ ν
μ€νΈ μ²ν¬λ‘ λλλ ν¨μμ
λλ€.
|
| 57 |
def get_text_chunks(documents):
|
| 58 |
text_splitter = RecursiveCharacterTextSplitter(
|
| 59 |
+
chunk_size=1000, # μ²ν¬μ ν¬κΈ°λ₯Ό μ§μ ν©λλ€.
|
| 60 |
+
chunk_overlap=200, # μ²ν¬ μ¬μ΄μ μ€λ³΅μ μ§μ ν©λλ€.
|
| 61 |
+
length_function=len # ν
μ€νΈμ κΈΈμ΄λ₯Ό μΈ‘μ νλ ν¨μλ₯Ό μ§μ ν©λλ€.
|
| 62 |
)
|
| 63 |
|
| 64 |
+
documents = text_splitter.split_documents(documents) # λ¬Έμλ€μ μ²ν¬λ‘ λλλλ€
|
| 65 |
+
return documents # λλ μ²ν¬λ₯Ό λ°νν©λλ€.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
|
| 68 |
# ν
μ€νΈ μ²ν¬λ€λ‘λΆν° λ²‘ν° μ€ν μ΄λ₯Ό μμ±νλ ν¨μμ
λλ€.
|
|
|
|
| 162 |
|
| 163 |
|
| 164 |
if __name__ == '__main__':
|
| 165 |
+
main()
|