Spaces:
Sleeping
Sleeping
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import Chroma | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain.document_loaders import TextLoader | |
| from langchain.chains import RetrievalQA | |
| from langchain.llms.base import LLM | |
| from typing import List, Optional | |
| from groq import Groq | |
| import os | |
| loader = TextLoader("./Project.txt") | |
| documents = loader.load() | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) | |
| docs = text_splitter.split_documents(documents) | |
| embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") | |
| vectorstore = Chroma.from_documents(docs, embedding, persist_directory="rag_chroma_groq") | |
| class GroqLLM(LLM): | |
| model: str = "llama3-8b-8192" | |
| api_key: str = "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" # Replace with your actual API key | |
| temperature: float = 0.0 | |
| def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: | |
| client = Groq(api_key=self.api_key) | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": prompt} | |
| ] | |
| response = client.chat.completions.create( | |
| model=self.model, | |
| messages=messages, | |
| temperature=self.temperature, | |
| ) | |
| return response.choices[0].message.content | |
| def _llm_type(self) -> str: | |
| return "groq-llm" | |
| retriever = vectorstore.as_retriever() | |
| groq_llm = GroqLLM(api_key="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") | |
| qa_chain = RetrievalQA.from_chain_type( | |
| llm=groq_llm, | |
| retriever=retriever, | |
| return_source_documents=True | |
| ) | |
| query = "Explain the whole project in points and sections" | |
| result = qa_chain({"query": query}) | |
| print("Answer:", result["result"]) | |
| import gradio as gr | |
| # Ensure qa_chain is defined (from your code above) | |
| # Define the function that will be called when the user submits a question | |
| def answer_query(query): | |
| result = qa_chain({"query": query}) | |
| return result["result"] | |
| # Create the Gradio interface | |
| interface = gr.Interface( | |
| fn=answer_query, | |
| inputs=gr.Textbox(lines=2, placeholder="Ask me anything about the project..."), | |
| outputs="text", | |
| title="🧠 Project Summariser", | |
| description="Ask questions based on my projects" | |
| ) | |
| # Launch the interface | |
| interface.launch() | |