Spaces:
Sleeping
Sleeping
| """ | |
| LLM Interface Moduli | |
| =================== | |
| OpenAI API bilan bog‘lanishni boshqaradi va function calling’ni qo‘llab-quvvatlaydi. | |
| Ushbu modul quyidagilarni ta’minlaydi: | |
| - Kontekstga asoslangan prompt qurish | |
| - OpenAI API bilan integratsiya | |
| - Function (tool) chaqirishni qo‘llab-quvvatlash | |
| - Javoblarni qayta ishlash va nazorat qilish | |
| 📌 RAG tizimida bu modul: | |
| - Retriever topgan kontekstni LLM’ga to‘g‘ri uzatadi | |
| - Modelni faqat hujjat ichidagi ma’lumot bilan javob berishga majbur qiladi | |
| """ | |
| import os | |
| import json | |
| from typing import List, Dict, Any, Optional | |
| from openai import OpenAI | |
| from .tools import RAGTools | |
| os.environ["OPENAI_API_KEY"] = "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" | |
| #llm = LLMInterface() | |
| class LLMInterface: | |
| """ | |
| OpenAI LLM bilan muloqot qilish uchun interfeys (function calling bilan). | |
| Ushbu klass quyidagi vazifalarni bajaradi: | |
| - Qidirilgan kontekst asosida prompt qurish | |
| - OpenAI API’ga so‘rov yuborish | |
| - Tool (function) chaqiruvlarini qayta ishlash | |
| - Modelni faqat berilgan kontekst bilan javob berishga majbur qilish | |
| Misol: | |
| llm = LLMInterface() | |
| response = llm.generate(context, question, tools) | |
| 📌 Bu klass — RAG pipeline’ning "miyasi". | |
| """ | |
| # Modelni faqat kontekst bilan javob berishga majbur qiluvchi system prompt | |
| SYSTEM_PROMPT = """You are a helpful assistant that answers questions based ONLY on the provided context. | |
| IMPORTANT RULES: | |
| 1. Only use information from the provided context to answer questions | |
| 2. If the answer cannot be found in the context, respond exactly: "The answer is not found in the document" | |
| 3. Do not make up information or use knowledge outside the context | |
| 4. Be concise and accurate in your responses | |
| 5. When using tool results, incorporate them naturally into your response | |
| 6. Always cite which part of the context your answer comes from when possible""" | |
| def __init__( | |
| self, | |
| api_key: "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", | |
| model: str = "gpt-4o-mini" | |
| ): | |
| """ | |
| LLM interfeysini ishga tushiradi. | |
| Args: | |
| api_key: | |
| OpenAI API kaliti. | |
| Agar berilmasa, OPENAI_API_KEY environment variable’dan olinadi. | |
| model: | |
| Ishlatiladigan OpenAI modeli. | |
| 📌 API kalit bo‘lmasa, dastur ishlamaydi. | |
| """ | |
| self.api_key = api_key or "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" | |
| if not self.api_key: | |
| raise ValueError( | |
| "OpenAI API kaliti talab qilinadi. " | |
| "OPENAI_API_KEY environment variable’ni o‘rnating yoki api_key yuboring." | |
| ) | |
| # OpenAI client’ni yaratamiz | |
| self.client = OpenAI(api_key=self.api_key) | |
| self.model = model | |
| # RAG uchun tool’larni boshqaruvchi klass | |
| self.tools = RAGTools() | |
| def build_prompt(self, context: str, question: str) -> str: | |
| """ | |
| Kontekst va savol asosida user prompt yaratadi. | |
| Args: | |
| context: | |
| Hujjatdan topilgan kontekst (retriever natijasi). | |
| question: | |
| Foydalanuvchi savoli. | |
| Returns: | |
| Tayyor prompt matni. | |
| 📌 Bu prompt modelga: | |
| "faqat mana shu kontekst asosida javob ber" degan qat’iy ko‘rsatma beradi. | |
| """ | |
| return f"""Context from document: | |
| --- | |
| {context} | |
| --- | |
| Question: {question} | |
| Please answer based ONLY on the context provided above.""" | |
| def generate( | |
| self, | |
| context: str, | |
| question: str, | |
| use_tools: bool = False, | |
| chunks: List[Dict] = None | |
| ) -> str: | |
| """ | |
| LLM orqali javob generatsiya qiladi. | |
| Args: | |
| context: | |
| Retriever topgan hujjat konteksti. | |
| question: | |
| Foydalanuvchi savoli. | |
| use_tools: | |
| Function calling yoqiladimi yoki yo‘q. | |
| chunks: | |
| Tool’lar ishlatishi mumkin bo‘lgan chunk ma’lumotlari. | |
| Returns: | |
| Modeldan qaytgan yakuniy javob. | |
| 📌 Bu metod: | |
| - oddiy RAG | |
| - yoki tool bilan boyitilgan RAG | |
| ikkala holatda ham ishlaydi. | |
| """ | |
| # Tool’lar uchun kontekstni o‘rnatamiz | |
| self.tools.set_context(context, chunks) | |
| # Chat uchun xabarlar | |
| messages = [ | |
| {"role": "system", "content": self.SYSTEM_PROMPT}, | |
| {"role": "user", "content": self.build_prompt(context, question)} | |
| ] | |
| # OpenAI API parametrlari | |
| params = { | |
| "model": self.model, | |
| "messages": messages, | |
| "temperature": 0.1, # Past temperature — aniq va faktlarga asoslangan javob | |
| "max_tokens": 1000 | |
| } | |
| # Agar tool’lar yoqilgan bo‘lsa | |
| if use_tools: | |
| params["tools"] = self.tools.get_tool_definitions() | |
| params["tool_choice"] = "auto" | |
| # API chaqiruvi | |
| response = self.client.chat.completions.create(**params) | |
| message = response.choices[0].message | |
| # Agar model tool chaqirmoqchi bo‘lsa | |
| if message.tool_calls: | |
| return self._handle_tool_calls(messages, message, context, question) | |
| return message.content | |
| def _handle_tool_calls( | |
| self, | |
| messages: List[Dict], | |
| assistant_message, | |
| context: str, | |
| question: str | |
| ) -> str: | |
| """ | |
| LLM tomonidan chaqirilgan tool (function)larni bajaradi. | |
| Ushbu metod: | |
| 1. Har bir tool chaqiruvini bajaradi | |
| 2. Natijani suhbatga qo‘shadi | |
| 3. LLM’dan yakuniy javobni qayta so‘raydi | |
| Args: | |
| messages: | |
| Joriy suhbat xabarlari. | |
| assistant_message: | |
| Tool chaqiruvlarini o‘z ichiga olgan assistant xabari. | |
| context: | |
| Hujjat konteksti. | |
| question: | |
| Asl savol. | |
| Returns: | |
| Yakuniy javob. | |
| """ | |
| # Tool chaqiruvlari bilan assistant xabarini qo‘shamiz | |
| messages.append({ | |
| "role": "assistant", | |
| "content": assistant_message.content, | |
| "tool_calls": [ | |
| { | |
| "id": tc.id, | |
| "type": "function", | |
| "function": { | |
| "name": tc.function.name, | |
| "arguments": tc.function.arguments | |
| } | |
| } | |
| for tc in assistant_message.tool_calls | |
| ] | |
| }) | |
| # Har bir tool’ni bajarish | |
| for tool_call in assistant_message.tool_calls: | |
| tool_name = tool_call.function.name | |
| try: | |
| arguments = json.loads(tool_call.function.arguments) | |
| except json.JSONDecodeError: | |
| arguments = {} | |
| # Tool’ni ishga tushiramiz | |
| print(f" [Tool Call] {tool_name}({arguments})") | |
| result = self.tools.execute_tool(tool_name, arguments) | |
| print( | |
| f" [Tool Result] {result[:100]}..." | |
| if len(result) > 100 else f" [Tool Result] {result}" | |
| ) | |
| # Tool natijasini chatga qo‘shamiz | |
| messages.append({ | |
| "role": "tool", | |
| "tool_call_id": tool_call.id, | |
| "content": result | |
| }) | |
| # Tool natijalari bilan yakuniy javobni olamiz | |
| final_response = self.client.chat.completions.create( | |
| model=self.model, | |
| messages=messages, | |
| temperature=0.1, | |
| max_tokens=1000 | |
| ) | |
| return final_response.choices[0].message.content | |
| def generate_simple(self, context: str, question: str) -> str: | |
| """ | |
| Tool’siz oddiy javob generatsiya qiladi. | |
| Oddiy Q&A uchun qulay. | |
| Args: | |
| context: | |
| Retriever topgan kontekst. | |
| question: | |
| Savol. | |
| Returns: | |
| LLM javobi. | |
| """ | |
| return self.generate(context, question, use_tools=False) | |
| def generate_with_tools( | |
| self, | |
| context: str, | |
| question: str, | |
| chunks: List[Dict] = None | |
| ) -> str: | |
| """ | |
| Function calling yoqilgan holda javob generatsiya qiladi. | |
| Args: | |
| context: | |
| Kontekst. | |
| question: | |
| Savol. | |
| chunks: | |
| Tool’lar ishlatishi mumkin bo‘lgan chunklar. | |
| Returns: | |
| LLM javobi. | |
| """ | |
| return self.generate(context, question, use_tools=True, chunks=chunks) | |
| # Namuna sifatida ishlatish (test) | |
| if __name__ == "__main__": | |
| # Faqat OPENAI_API_KEY mavjud bo‘lsa ishlaydi | |
| try: | |
| llm = LLMInterface() | |
| test_context = """ | |
| Python is a high-level programming language known for its readability. | |
| It was created by Guido van Rossum and first released in 1991. | |
| Python supports multiple programming paradigms including procedural, | |
| object-oriented, and functional programming. | |
| """ | |
| response = llm.generate_simple( | |
| test_context, | |
| "Who created Python?" | |
| ) | |
| print(f"Javob: {response}") | |
| except ValueError as e: | |
| print(f"Testni bajarib bo‘lmadi: {e}") | |