ghostdev11 commited on
Commit
62218d5
·
verified ·
1 Parent(s): 4327b2e

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +17 -13
  2. app.py +62 -64
  3. config.py +13 -0
  4. requirements.txt +7 -1
README.md CHANGED
@@ -1,13 +1,17 @@
1
- ---
2
- title: Multi Dataset Rag Chatbot
3
- emoji: 💬
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.0.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
1
+ # RAG Chatbot với Gemini
2
+
3
+ Chatbot sử dụng Retrieval-Augmented Generation (RAG) với Gemini API.
4
+
5
+ ## Cài đặt
6
+
7
+ 1. Clone repo
8
+ 2. Cài đặt dependencies: `pip install -r requirements.txt`
9
+ 3. Thêm Gemini API key vào file `.env`
10
+ 4. Đặt dataset vào `data/your_dataset.txt`
11
+ 5. Chạy: `python app.py`
12
+
13
+ ## Deploy lên Hugging Face Spaces
14
+
15
+ 1. Tạo Space mới trên Hugging Face
16
+ 2. Upload tất cả files
17
+ 3. Thêm secret GEMINI_API_KEY trong Settings
app.py CHANGED
@@ -1,64 +1,62 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ import gradio as gr
2
+ import os
3
+ from utils.embeddings import EmbeddingModel
4
+ from utils.vector_store import VectorStore
5
+ from utils.rag_chain import RAGChain
6
+ from config import CHUNK_SIZE, CHUNK_OVERLAP
7
+
8
+ # Initialize components
9
+ embedding_model = EmbeddingModel()
10
+ vector_store = VectorStore()
11
+ vector_store.create_collection()
12
+
13
+ def load_and_process_data(file_path):
14
+ """Load và xử lý dataset"""
15
+ with open(file_path, 'r', encoding='utf-8') as f:
16
+ text = f.read()
17
+
18
+ # Chia thành chunks
19
+ chunks = []
20
+ for i in range(0, len(text), CHUNK_SIZE - CHUNK_OVERLAP):
21
+ chunk = text[i:i + CHUNK_SIZE]
22
+ chunks.append(chunk)
23
+
24
+ # Tạo embeddings
25
+ embeddings = embedding_model.embed_documents(chunks)
26
+
27
+ # Lưu vào vector store
28
+ vector_store.add_documents(chunks, embeddings)
29
+
30
+ return len(chunks)
31
+
32
+ # Load data khi khởi động
33
+ if os.path.exists("data/your_dataset.txt"):
34
+ num_chunks = load_and_process_data("data/your_dataset.txt")
35
+ print(f"Đã load {num_chunks} chunks")
36
+
37
+ # Initialize RAG chain
38
+ rag_chain = RAGChain(vector_store, embedding_model)
39
+
40
+ def chatbot_response(message, history):
41
+ """Xử lý tin nhắn và trả về response"""
42
+ try:
43
+ response = rag_chain.get_answer(message)
44
+ return response
45
+ except Exception as e:
46
+ return f"Lỗi: {str(e)}"
47
+
48
+ # Tạo Gradio interface
49
+ demo = gr.ChatInterface(
50
+ fn=chatbot_response,
51
+ title="RAG Chatbot với Gemini",
52
+ description="Chatbot sử dụng RAG (Retrieval-Augmented Generation) với Gemini API",
53
+ examples=[
54
+ "Xin chào!",
55
+ "Hãy giải thích về RAG",
56
+ "Thông tin trong dataset là gì?"
57
+ ],
58
+ theme="soft"
59
+ )
60
+
61
+ if __name__ == "__main__":
62
+ demo.launch()
 
 
config.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ # Gemini API Key (lấy free tại https://makersuite.google.com/app/apikey)
7
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "your-api-key-here")
8
+
9
+ # Model settings
10
+ EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
11
+ GEMINI_MODEL = "gemini-pro"
12
+ CHUNK_SIZE = 500
13
+ CHUNK_OVERLAP = 50
requirements.txt CHANGED
@@ -1 +1,7 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
1
+ gradio==4.16.0
2
+ google-generativeai==0.3.2
3
+ langchain==0.1.0
4
+ langchain-google-genai==0.0.5
5
+ chromadb==0.4.22
6
+ sentence-transformers==2.2.2
7
+ python-dotenv==1.0.0