Upload 3 files
Browse files- Dockerfile.txt +20 -0
- main.py +142 -0
- requirements.txt +13 -0
Dockerfile.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Gunakan image dasar Python 3.9
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Tetapkan direktori kerja di dalam container
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Salin file requirements dulu untuk caching yang lebih efisien
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
|
| 10 |
+
# Install semua library Python yang dibutuhkan
|
| 11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# Salin semua sisa kode backend ke dalam container
|
| 14 |
+
COPY . .
|
| 15 |
+
|
| 16 |
+
# Beri tahu Docker port mana yang akan diekspos oleh aplikasi kita
|
| 17 |
+
EXPOSE 8000
|
| 18 |
+
|
| 19 |
+
# Perintah untuk menjalankan server FastAPI saat container dimulai
|
| 20 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
main.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# main.py (Versi Final Definitif - Dengan Parsing Cerdas & Prompt Tepat)
|
| 2 |
+
import os
|
| 3 |
+
import shutil
|
| 4 |
+
import re
|
| 5 |
+
import uvicorn
|
| 6 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from pydantic import BaseModel
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from langchain_community.document_loaders import PyPDFLoader
|
| 12 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 13 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 14 |
+
from langchain_community.vectorstores import FAISS
|
| 15 |
+
from langchain_community.retrievers import BM25Retriever
|
| 16 |
+
from langchain.retrievers import EnsembleRetriever
|
| 17 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 18 |
+
import torch._dynamo as dynamo
|
| 19 |
+
|
| 20 |
+
dynamo.config.automatic_dynamic_shapes = False
|
| 21 |
+
dynamo.config.assume_static_by_default = True
|
| 22 |
+
|
| 23 |
+
UPLOAD_DIR = "temp_uploads"
|
| 24 |
+
os.makedirs(UPLOAD_DIR, exist_ok=True)
|
| 25 |
+
|
| 26 |
+
app = FastAPI(title="Financial RAG Chatbot API")
|
| 27 |
+
|
| 28 |
+
origins = ["*"]
|
| 29 |
+
app.add_middleware(CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"])
|
| 30 |
+
|
| 31 |
+
rag_pipeline = {"retriever": None, "llm": None, "tokenizer": None, "embeddings": None, "all_chunks": None}
|
| 32 |
+
|
| 33 |
+
def setup_rag_pipeline():
|
| 34 |
+
print("Memulai setup RAG pipeline...")
|
| 35 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 36 |
+
print(f"Menggunakan device: {device}")
|
| 37 |
+
print("Memuat Embedding Model...")
|
| 38 |
+
rag_pipeline["embeddings"] = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': device})
|
| 39 |
+
print("Memuat LLM (Gemma 270M)...")
|
| 40 |
+
rag_pipeline["tokenizer"] = AutoTokenizer.from_pretrained("google/gemma-3-270m-it")
|
| 41 |
+
rag_pipeline["llm"] = AutoModelForCausalLM.from_pretrained("google/gemma-3-270m-it", device_map="auto", torch_dtype=torch.bfloat16)
|
| 42 |
+
print("Setup RAG pipeline selesai.")
|
| 43 |
+
|
| 44 |
+
@app.on_event("startup")
|
| 45 |
+
async def startup_event():
|
| 46 |
+
setup_rag_pipeline()
|
| 47 |
+
|
| 48 |
+
@app.post("/upload")
|
| 49 |
+
async def upload_document(file: UploadFile = File(...)):
|
| 50 |
+
# Fungsi ini tetap sama
|
| 51 |
+
try:
|
| 52 |
+
file_path = os.path.join(UPLOAD_DIR, file.filename)
|
| 53 |
+
with open(file_path, "wb") as buffer: shutil.copyfileobj(file.file, buffer)
|
| 54 |
+
loader = PyPDFLoader(file_path)
|
| 55 |
+
docs = loader.load()
|
| 56 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200)
|
| 57 |
+
chunks = text_splitter.split_documents(docs)
|
| 58 |
+
rag_pipeline["all_chunks"] = chunks
|
| 59 |
+
faiss_db = FAISS.from_documents(chunks, rag_pipeline["embeddings"])
|
| 60 |
+
faiss_retriever = faiss_db.as_retriever(search_kwargs={"k": 10})
|
| 61 |
+
bm25_retriever = BM25Retriever.from_documents(chunks)
|
| 62 |
+
bm25_retriever.k = 10
|
| 63 |
+
rag_pipeline["retriever"] = EnsembleRetriever(retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5])
|
| 64 |
+
return {"status": "sukses", "message": f"Dokumen '{file.filename}' berhasil diproses."}
|
| 65 |
+
except Exception as e: raise HTTPException(status_code=500, detail=str(e))
|
| 66 |
+
finally:
|
| 67 |
+
if 'file_path' in locals() and os.path.exists(file_path): os.remove(file_path)
|
| 68 |
+
|
| 69 |
+
class ChatRequest(BaseModel):
|
| 70 |
+
query: str
|
| 71 |
+
|
| 72 |
+
@app.post("/chat")
|
| 73 |
+
async def chat_with_doc(request: ChatRequest):
|
| 74 |
+
if not rag_pipeline.get("retriever"):
|
| 75 |
+
raise HTTPException(status_code=400, detail="Dokumen belum di-upload.")
|
| 76 |
+
|
| 77 |
+
query_original = request.query
|
| 78 |
+
query_lower = query_original.lower()
|
| 79 |
+
clean_context = ""
|
| 80 |
+
found_source = ""
|
| 81 |
+
|
| 82 |
+
priority_keywords = ["jumlah aset lancar"]
|
| 83 |
+
use_smart_lane = any(keyword in query_lower for keyword in priority_keywords)
|
| 84 |
+
|
| 85 |
+
if use_smart_lane:
|
| 86 |
+
print("Jalur Cerdas Aktif! Mencari konteks & parsing tahun...")
|
| 87 |
+
year_match = re.search(r'\b(202[3-4])\b', query_lower)
|
| 88 |
+
target_year = year_match.group(1) if year_match else "2024"
|
| 89 |
+
|
| 90 |
+
all_chunks = rag_pipeline.get("all_chunks", [])
|
| 91 |
+
for chunk in all_chunks:
|
| 92 |
+
lines = chunk.page_content.split('\n')
|
| 93 |
+
for line in lines:
|
| 94 |
+
if any(keyword in line.lower() for keyword in priority_keywords):
|
| 95 |
+
# --- [FIX #1] PARSING CERDAS DENGAN REGEX ---
|
| 96 |
+
# Mencari semua angka yang diformat dengan koma/titik
|
| 97 |
+
numbers = re.findall(r'(\d{1,3}(?:[.,]\d{3})*)', line)
|
| 98 |
+
if len(numbers) >= 2:
|
| 99 |
+
try:
|
| 100 |
+
# Asumsi angka pertama adalah 2024, kedua 2023
|
| 101 |
+
value_2024 = numbers[0]
|
| 102 |
+
value_2023 = numbers[1]
|
| 103 |
+
value = value_2024 if target_year == "2024" else value_2023
|
| 104 |
+
# Buat fakta yang bersih untuk LLM
|
| 105 |
+
clean_context = f"Fakta: jumlah aset lancar untuk tahun {target_year} adalah {value}."
|
| 106 |
+
found_source = f"Halaman {chunk.metadata.get('page', 'NA')}"
|
| 107 |
+
break
|
| 108 |
+
except (IndexError, ValueError):
|
| 109 |
+
continue
|
| 110 |
+
if clean_context:
|
| 111 |
+
break
|
| 112 |
+
|
| 113 |
+
if not clean_context:
|
| 114 |
+
print("Menggunakan Jalur Normal (Hybrid Search)...")
|
| 115 |
+
retrieved_docs = rag_pipeline["retriever"].invoke(query_original)
|
| 116 |
+
clean_context = "\n\n".join([doc.page_content for doc in retrieved_docs[:3]])
|
| 117 |
+
found_source = ", ".join(list(set([f"Halaman {doc.metadata.get('page', 'NA')}" for doc in retrieved_docs[:3]])))
|
| 118 |
+
|
| 119 |
+
# --- Generation ---
|
| 120 |
+
tokenizer = rag_pipeline["tokenizer"]
|
| 121 |
+
model = rag_pipeline["llm"]
|
| 122 |
+
|
| 123 |
+
# --- [FIX #2] PROMPT YANG SANGAT TO-THE-POINT ---
|
| 124 |
+
chat_template = [
|
| 125 |
+
{"role": "user", "content": f"Gunakan informasi ini: '{clean_context}'. Jawab pertanyaan ini: '{query_original}'"}
|
| 126 |
+
]
|
| 127 |
+
final_prompt = tokenizer.apply_chat_template(chat_template, tokenize=False, add_generation_prompt=True)
|
| 128 |
+
inputs = tokenizer(final_prompt, return_tensors="pt").to(model.device)
|
| 129 |
+
outputs = model.generate(**inputs, max_new_tokens=150, do_sample=False, pad_token_id=tokenizer.eos_token_id)
|
| 130 |
+
|
| 131 |
+
input_length = inputs.input_ids.shape[1]
|
| 132 |
+
generated_tokens = outputs[0, input_length:]
|
| 133 |
+
final_answer = tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
| 134 |
+
|
| 135 |
+
return {"answer": final_answer.strip(), "source": found_source}
|
| 136 |
+
|
| 137 |
+
@app.get("/")
|
| 138 |
+
def read_root():
|
| 139 |
+
return {"message": "Selamat datang di Financial RAG Chatbot API!"}
|
| 140 |
+
|
| 141 |
+
if __name__ == "__main__":
|
| 142 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
python-multipart
|
| 4 |
+
torch
|
| 5 |
+
transformers
|
| 6 |
+
accelerate
|
| 7 |
+
bitsandbytes
|
| 8 |
+
langchain
|
| 9 |
+
sentence-transformers
|
| 10 |
+
pypdf
|
| 11 |
+
faiss-cpu
|
| 12 |
+
rank_bm25
|
| 13 |
+
langchain_community
|