Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from langchain_huggingface import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import Chroma | |
| from langchain_community.llms import HuggingFaceHub | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains import RetrievalQA | |
| import warnings, os | |
| from dotenv import load_dotenv | |
| warnings.filterwarnings("ignore") | |
| import sys | |
| warnings.filterwarnings("ignore") | |
| __import__('pysqlite3') | |
| import sys | |
| sys.modules['sqlite3'] = sys.modules.pop('pysqlite3') | |
| # Load environment variables from .env file | |
| load_dotenv() | |
| data_directory = os.path.join(os.path.dirname(__file__), "data") | |
| os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| # st.secrets["huggingface_api_token"] # Don't forget to add your hugging face token | |
| # Load the vector store from disk | |
| embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| vector_store = Chroma(embedding_function=embedding_model, persist_directory=data_directory) | |
| # Initialize the Hugging Face Hub LLM | |
| hf_hub_llm = HuggingFaceHub( | |
| repo_id="meta-llama/Meta-Llama-3-8B-Instruct", | |
| model_kwargs={"temperature": 1, "max_new_tokens":1024}, | |
| ) | |
| prompt_template = """ | |
| You are an AI chatbot specializing in the domain of law, | |
| focusing on the recent changes made by the Indian government on July 1 2024 from the old THE INDIAN PENAL CODE(IPC) law to the new Bharatiya Nyaya Sanhita(BNS) law, 2023. | |
| Your task is to provide information about this transition. | |
| Here are your specific instructions: | |
| 1. **Simple Definitions**: Provide a brief, easy-to-understand definition of the BNS law for the general public. | |
| 2. **Codes Comparison**: Share the sections and clauses for both the IPC and the BNS, highlighting the changes. | |
| 3. **Punishments and Revisions**: Detail the punishments, penalties, and any improvements or revisions made in the BNS law. | |
| 4. **Detailed Comparison**: Conduct a comprehensive comparison between the IPC and the BNS. | |
| 5. **Articles and Videos**: Include references to relevant articles and videos discussing the new BNS law from authoritative sources. | |
| Ensure the information is accurate, concise, and accessible to users with varying levels of legal knowledge. | |
| Now, when the user interacts with you by saying 'hi', 'hello', or 'how are you', respond in an interactive manner to engage them effectively in a single line strictly. | |
| Do not call yourself as chatbot, call yourself as Lexify. | |
| If user asks about the number of Laws in IPC and BNS answer the given text as "The BNS has 356 sections, whereas the IPC had 511; 175 sections were changed, 8 new ones added, and 22 repealed." | |
| User Query: | |
| {context} | |
| Question: {question} | |
| Answer: | |
| """ | |
| custom_prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"]) | |
| rag_chain = RetrievalQA.from_chain_type( | |
| llm=hf_hub_llm, | |
| chain_type="stuff", | |
| retriever=vector_store.as_retriever(top_k=3), # retriever is set to fetch top 3 results | |
| chain_type_kwargs={"prompt": custom_prompt}) | |
| def get_response(question): | |
| result = rag_chain({"query": question}) | |
| response_text = result["result"] | |
| answer_start = response_text.find("Answer:") + len("Answer:") | |
| answer = response_text[answer_start:].strip() | |
| return answer | |
| # Streamlit app | |
| # Remove whitespace from the top of the page and sidebar | |
| st.markdown( | |
| """ | |
| <style> | |
| .appview-container .main .block-container {{ | |
| padding-top: {padding_top}rem; | |
| padding-bottom: {padding_bottom}rem; | |
| }} | |
| </style>""".format( | |
| padding_top=1, padding_bottom=1 | |
| ), | |
| unsafe_allow_html=True, | |
| ) | |
| st.markdown(""" | |
| <h3 style='text-align: left; color: black; padding-top: 35px; border-bottom: 3px solid red;'> | |
| LexifyAI: Your Personal Law Assistant | |
| </h3>""", unsafe_allow_html=True) | |
| side_bar_message = """ | |
| Hi! 👋 I'm here to help you with your Law Queries. What would you like to know or explore? | |
| \nHere are some areas you might be interested in: | |
| 1. **IPC Laws** | |
| 2. **BNS Laws** | |
| 3. **Comparing Both** | |
| 4. **And Many More** 🌞 | |
| Feel free to ask me anything about Law and Justice! | |
| """ | |
| with st.sidebar: | |
| st.title('🤖LexifyAI') | |
| st.markdown(side_bar_message) | |
| initial_message = """ | |
| Hi there! I'm your Law and Justice Bot 🤖 | |
| Here are some questions you might ask me:\n | |
| ⚖️ What is IPC?\n | |
| ⚖️ When BNS Law was made?\n | |
| ⚖️ On which date BNS was Implemented in the Country?\n | |
| """ | |
| # Store LLM generated responses | |
| if "messages" not in st.session_state.keys(): | |
| st.session_state.messages = [{"role": "assistant", "content": initial_message}] | |
| # Display or clear chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| def clear_chat_history(): | |
| st.session_state.messages = [{"role": "assistant", "content": initial_message}] | |
| st.button('Clear Chat', on_click=clear_chat_history) | |
| # User-provided prompt | |
| if prompt := st.chat_input(): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Generate a new response if last message is not from assistant | |
| if st.session_state.messages[-1]["role"] != "assistant": | |
| with st.chat_message("assistant"): | |
| with st.spinner("Hold on, I'm fetching the latest Legal advice for you..."): | |
| response = get_response(prompt) | |
| placeholder = st.empty() | |
| full_response = response # Directly use the response | |
| placeholder.markdown(full_response) | |
| message = {"role": "assistant", "content": full_response} | |
| st.session_state.messages.append(message) | |