SArmagan commited on
Commit
2b81945
·
verified ·
1 Parent(s): 0925136

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ nutuk_chroma_db/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
  title: NutukGPT
3
- emoji: 👁
4
- colorFrom: gray
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 6.5.1
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: NutukGPT
3
+ app_file: nutukGPT.py
 
 
4
  sdk: gradio
5
+ sdk_version: 5.49.1
 
 
6
  ---
 
 
nutukGPT.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import chromadb
4
+ import gradio as gr
5
+ import nltk
6
+ nltk.download('punkt')
7
+ nltk.download('punkt_tab')
8
+
9
+ from nltk.tokenize import sent_tokenize
10
+ from sentence_transformers import SentenceTransformer
11
+ from pypdf import PdfReader
12
+ from agents import Agent, Runner, trace, ModelSettings
13
+ from agents.mcp import MCPServerStdio
14
+
15
+ MODEL_NAME = "gpt-5-nano"
16
+ PDF_PATH = "Nutuk_modern.pdf"
17
+ DB_PATH = "./nutuk_chroma_db"
18
+ CHUNK_SIZE = 1000
19
+ OVERLAP = 100
20
+ MAX_TURNS = 10
21
+
22
+ def clean_text(text):
23
+ """
24
+ Cleans PDF artifacts: removes newlines, handles hyphenated words
25
+ at line breaks, and strips extra whitespace.
26
+ """
27
+ # Join words split by hyphens at the end of a line (common in PDFs)
28
+ text = re.sub(r'(\w+)-\s*\n(\w+)', r'\1\2', text)
29
+ # Replace newlines with spaces
30
+ text = text.replace('\n', ' ')
31
+ # Remove multiple spaces
32
+ text = re.sub(r'\s+', ' ', text)
33
+ return text.strip()
34
+
35
+ def chunk_text(text, chunk_size=1000, overlap=100):
36
+ """
37
+ Using NLTK Turkish sentence tokenization for better sentence splitting (handles "Gen.", "Prof.", etc.)
38
+ """
39
+ sentences = sent_tokenize(text, language='turkish')
40
+
41
+ chunks = []
42
+ current_chunk = []
43
+ current_length = 0
44
+
45
+ for sentence in sentences:
46
+ sentence_len = len(sentence)
47
+
48
+ # If a single sentence is longer than chunk_size, force-split it
49
+ if sentence_len > chunk_size:
50
+ if current_chunk:
51
+ chunks.append(" ".join(current_chunk))
52
+ current_chunk = []
53
+ current_length = 0
54
+
55
+ # Add the long sentence as its own chunk or split it
56
+ chunks.append(sentence[:chunk_size])
57
+ continue
58
+
59
+ if current_length + sentence_len > chunk_size:
60
+ chunks.append(" ".join(current_chunk))
61
+
62
+ overlap_text = ""
63
+ overlap_len = 0
64
+ new_start = []
65
+ for s in reversed(current_chunk):
66
+ if overlap_len + len(s) < overlap:
67
+ new_start.insert(0, s)
68
+ overlap_len += len(s)
69
+ else:
70
+ break
71
+ current_chunk = new_start
72
+ current_length = overlap_len
73
+
74
+ current_chunk.append(sentence)
75
+ current_length += sentence_len
76
+
77
+ if current_chunk:
78
+ chunks.append(" ".join(current_chunk))
79
+
80
+ return chunks
81
+
82
+ def rag_pipeline():
83
+ encoder = SentenceTransformer("selmanbaysan/turkish_embedding_model_fine_tuned")
84
+ client = chromadb.PersistentClient(path=DB_PATH)
85
+ collection = client.get_or_create_collection(name="nutuk_collection")
86
+
87
+ print("Reading and Cleaning PDF...")
88
+ reader = PdfReader(PDF_PATH)
89
+
90
+ documents = []
91
+ metadatas = []
92
+ ids = []
93
+ id_count = 0
94
+
95
+ for page_num, page in enumerate(reader.pages):
96
+ raw_text = page.extract_text()
97
+ if not raw_text:
98
+ continue
99
+
100
+ cleaned_text = clean_text(raw_text)
101
+ page_chunks = chunk_text(cleaned_text, chunk_size=CHUNK_SIZE, overlap=OVERLAP)
102
+
103
+ for chunk in page_chunks:
104
+ documents.append(chunk)
105
+ metadatas.append({"source": "Nutuk", "page": page_num + 1})
106
+ ids.append(f"id_{id_count}")
107
+ id_count += 1
108
+
109
+ print(f"Generated {len(documents)} chunks.")
110
+
111
+ batch_size = 100
112
+ for i in range(0, len(documents), batch_size):
113
+ batch_docs = documents[i:i+batch_size]
114
+ batch_metas = metadatas[i:i+batch_size]
115
+ batch_ids = ids[i:i+batch_size]
116
+ batch_embeddings = encoder.encode(batch_docs).tolist()
117
+
118
+ collection.add(
119
+ embeddings=batch_embeddings,
120
+ documents=batch_docs,
121
+ metadatas=batch_metas,
122
+ ids=batch_ids
123
+ )
124
+ print(f"Database saved to {DB_PATH}")
125
+
126
+ def testing_rag(query):
127
+ client = chromadb.PersistentClient(path=DB_PATH)
128
+ encoder = SentenceTransformer("selmanbaysan/turkish_embedding_model_fine_tuned") # model finetuned on Turkish datasets
129
+ collection = client.get_collection(name="nutuk_collection")
130
+
131
+ query_embedding = encoder.encode(query).tolist()
132
+
133
+ results = collection.query(query_embeddings=[query_embedding], n_results=5)
134
+
135
+ print("--- Retrieving Context ---")
136
+ for i, doc in enumerate(results['documents'][0]):
137
+ page_num = results['metadatas'][0][i]['page']
138
+ print(f"[Page {page_num}]: {doc}" + "\n\n")
139
+
140
+ async def chat(message, history):
141
+
142
+ client = chromadb.PersistentClient(path=DB_PATH)
143
+ encoder = SentenceTransformer("selmanbaysan/turkish_embedding_model_fine_tuned")
144
+ collection = client.get_collection(name="nutuk_collection")
145
+
146
+ query_embedding = encoder.encode(message).tolist()
147
+ results = collection.query(query_embeddings=[query_embedding], n_results=5)
148
+
149
+ print("--- Retrieving Context ---")
150
+ retrieved_context = ""
151
+ for i, doc in enumerate(results['documents'][0]):
152
+ page_num = results['metadatas'][0][i]['page']
153
+ retrieved_context += f"[Sayfa {page_num}]: {doc}" + "\n"
154
+
155
+ system_prompt = f"""
156
+
157
+ Sen, Mustafa Kemal Atatürk'ün ölümsüz eseri "Nutuk" üzerine uzmanlaşmış bir asistansın. Görevin, sana \
158
+ sağlanan metin parçalarını (bağlamı) kullanarak kullanıcı sorularına yanıt vermek.
159
+
160
+ Buna ek olarak "Web Search" aracını kullanarak internetteki bilgileri de kullan.
161
+ Arama yaparken sayfa numarası, saat veya çok spesifik metin parçalarını sorguya dahil etme. \
162
+ Sorgularını "olay adı + kişi" gibi genel anahtar kelimelerle oluştur.
163
+
164
+ ### Temel İlkelerin:
165
+ 1. **Sadakat ve Hiyerarşi:** Yanıtlarını öncelikle sana verilen bağlam (context) içindeki bilgilere dayandır. \
166
+ 2. **Üslup:** Resmi, saygılı, net ve Cumhuriyet vizyonuna uygun bir dil kullan. Nutuk'taki olayları anlatırken \
167
+ Atatürk'ün perspektifini yansıt (Örn: "Metne göre, Paşa bu durumu şöyle aktarıyor...").
168
+ 3. **Atıf Yapma:** Nutuk metninden aldığın bilgilerin sayfa numarasını mutlaka belirt (Örn: Sayfa 444). Web aramasından \
169
+ gelen bilgiler için ise "Web aramasına göre..." ifadesini kullan.
170
+ 4. **Çelişki Yönetimi:** Eğer kullanıcı sorusu, bağlamdaki bilgiler ve web sonuçları çelişiyorsa, Nutuk metnini esas al ve \
171
+ "Nutuk metnine göre durum şöyledir:" diyerek açıkla.
172
+
173
+ ### Yanıt Formatı:
174
+ - Yanıtlarını maddeler halinde veya kısa, öz paragraflarla yapılandır.
175
+ - Alıntı yaparken çift tırnak kullan ve kronolojik sırayı takip et.
176
+
177
+ Sana sunulan metin parçaları aşağıdadır:
178
+ ---------------------
179
+ {retrieved_context}
180
+ ---------------------
181
+ """
182
+
183
+ print(system_prompt)
184
+
185
+
186
+ processed_history = []
187
+ for msg in history:
188
+ processed_history.append({"role": msg['role'], "content": msg['content']})
189
+
190
+ # Append the new user message
191
+ processed_history.append({"role": "user", "content": message})
192
+
193
+ env = {"BRAVE_API_KEY": os.getenv("BRAVE_API_KEY")}
194
+ params = {"command": "npx", "args": ["-y", "@brave/brave-search-mcp-server"], "env": env}
195
+
196
+ async with MCPServerStdio(params=params, client_session_timeout_seconds=30) as mcp_server:
197
+ agent = Agent(name="agent", instructions=system_prompt, model=MODEL_NAME, mcp_servers=[mcp_server], model_settings=ModelSettings(tool_choice="required"))
198
+ with trace("nutukgpt"): # tracing and monitoring the agent
199
+ result = await Runner.run(agent, processed_history)
200
+ return result.final_output
201
+
202
+ if __name__ == "__main__":
203
+ # rag_pipeline()
204
+ # chat("cumhuriyetin ilanı nasıl oldu")
205
+ gr.ChatInterface(chat, type="messages").launch()
nutuk_chroma_db/0d0636b4-fe1f-4829-9615-74b16866fc29/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:759cf584a1c9580bad32c57422fa7be104db5425e65af00a47f48a9a449243bf
3
+ size 3212000
nutuk_chroma_db/0d0636b4-fe1f-4829-9615-74b16866fc29/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b58c6fcb8aa76156f8ef447a97afa6faced3ff9db8b05b2fdbcc5c4473480cee
3
+ size 100
nutuk_chroma_db/0d0636b4-fe1f-4829-9615-74b16866fc29/index_metadata.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e95ca74c2aff25ef5d046af261a268f2a301c689fb019737a50e9bc140b5b69
3
+ size 31912
nutuk_chroma_db/0d0636b4-fe1f-4829-9615-74b16866fc29/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:296a15251ec44d1976151fc5d9d1d4df2712c1e41bd1ef4399e5dabf5a4e60e2
3
+ size 4000
nutuk_chroma_db/0d0636b4-fe1f-4829-9615-74b16866fc29/link_lists.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a5419fb5ccd33b4f85624ef097b85e88723d6defa832e9e969574d08ea7508d
3
+ size 8624
nutuk_chroma_db/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca03670fbe24171c9b8daf927591d60b81c3e26a592308c7c7ce5862b8286e9d
3
+ size 12804096
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ requests
2
+ python-dotenv
3
+ gradio
4
+ pypdf
5
+ openai
6
+ chromadb
7
+ nltk
8
+ sentence-transformers
9
+ numpy
10
+ openai-agents
11
+ pysqlite3-binary