File size: 2,996 Bytes
1455b2b
13aaf2e
1455b2b
 
 
 
 
aa235e2
8165700
 
13aaf2e
aa235e2
 
 
 
13aaf2e
aa235e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13aaf2e
aa235e2
1455b2b
aa235e2
 
1455b2b
aa235e2
1455b2b
8165700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13aaf2e
aa235e2
 
 
 
8165700
 
 
aa235e2
 
 
 
 
8165700
 
 
 
aa235e2
 
13aaf2e
aa235e2
 
 
 
 
64324d8
aa235e2
 
13aaf2e
aa235e2
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os
import gradio as gr
from huggingface_hub import hf_hub_download
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_groq import ChatGroq
from langchain_community.document_loaders import PyPDFLoader
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain

# Configuration
HF_REPO_ID = "Shami96/7solar-documentation"
HF_PDF_NAME = "7solar_documentation.pdf"
HF_TOKEN = os.environ.get("HF_TOKEN")

# Initialize components
def initialize_components():
    print("⚙️ Initializing components...")
    
    # Load PDF
    try:
        pdf_path = hf_hub_download(
            repo_id=HF_REPO_ID,
            filename=HF_PDF_NAME,
            repo_type="dataset",
            token=HF_TOKEN
        )
        loader = PyPDFLoader(pdf_path)
        documents = loader.load()
    except Exception as e:
        raise RuntimeError(f"Failed to load PDF: {str(e)}")

    # Create vector store
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,
        chunk_overlap=200
    )
    chunks = text_splitter.split_documents(documents)
    embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
    vectorstore = Chroma.from_documents(chunks, embeddings)
    
    # Initialize LLM
    llm = ChatGroq(
        model_name="llama3-70b-8192",
        temperature=0.3
    )
    
    # Create conversation memory
    memory = ConversationBufferMemory(
        memory_key="chat_history",
        return_messages=True
    )
    
    # Create retrieval chain with memory
    qa_chain = ConversationalRetrievalChain.from_llm(
        llm=llm,
        retriever=vectorstore.as_retriever(),
        memory=memory,
        chain_type="stuff"
    )
    
    return qa_chain

# Chat function
def respond(message, history):
    try:
        # Initialize if not already done
        if 'qa_chain' not in globals():
            global qa_chain
            qa_chain = initialize_components()
        
        # Handle greetings
        if message.lower() in ["hi", "hello", "hey"]:
            return "Hello! I'm your 7Solar assistant. How can I help you today?"
        
        # Get response with conversation context
        result = qa_chain({"question": message})
        return result["answer"]
    
    except Exception as e:
        return f"An error occurred: {str(e)}"

# Create Gradio interface
demo = gr.ChatInterface(
    fn=respond,
    title="☀️ 7Solar Assistant",
    description="Ask me anything about 7Solar's services and documentation",
    examples=["What is 7Solar.pk?", "How does the registration process work?"],
    cache_examples=False
)

# Launch with error handling
if __name__ == "__main__":
    try:
        demo.launch(server_name="0.0.0.0", server_port=7860)
    except Exception as e:
        print(f"Failed to launch: {str(e)}")
        raise