Spaces:
Build error
Build error
| import streamlit as st | |
| import pandas as pd | |
| from langchain_community.llms import LlamaCpp | |
| from langchain_core.callbacks import StreamingStdOutCallbackHandler | |
| from langchain_core.prompts import PromptTemplate | |
| # Load the CSV file for Kendra Locator | |
| df = pd.read_csv('location.csv', encoding='Windows-1252') | |
| # Initialize session state for selected service and chatbot history | |
| if 'selected_service' not in st.session_state: | |
| st.session_state.selected_service = "Kendra Locator" | |
| if 'user_input' not in st.session_state: | |
| st.session_state['user_input'] = '' | |
| st.set_page_config(layout="centered", initial_sidebar_state="expanded") | |
| st.sidebar.title("KENDR LOCATOR") | |
| st.sidebar.write("Find One Near You!") | |
| display_option = st.sidebar.selectbox("Select:", ["Address", "Email"]) | |
| pin_code_input = st.sidebar.text_input("Enter Pin Code:") | |
| if st.sidebar.button("Locate"): | |
| if pin_code_input: | |
| result = df[df['Pin'].astype(str) == pin_code_input] | |
| if not result.empty: | |
| st.sidebar.write(f"**Name**: {result['Name'].values[0]}") | |
| if display_option == "Address": | |
| st.sidebar.write(f"**Address**: {result['Address'].values[0]}") | |
| elif display_option == "Email": | |
| st.sidebar.write(f"**Email**: {result['Email'].values[0]}") | |
| else: | |
| st.sidebar.write("No results found.") | |
| else: | |
| st.sidebar.write("Please enter a pin code.") | |
| llm = LlamaCpp( | |
| model_path="model.gguf", | |
| temperature=0.3, | |
| max_tokens=512, | |
| top_p=1, | |
| callbacks=[StreamingStdOutCallbackHandler()], | |
| verbose=False, | |
| stop=["###"] | |
| ) | |
| template = """You are a knowledgeable, polite, and contextually aware assistant. Your goal is to provide accurate, relevant, and easy-to-understand answers to users' questions. Always maintain a professional tone and avoid providing offensive or inappropriate responses. | |
| ### Question: | |
| {input} | |
| ### Response: | |
| {response}""" | |
| prompt = PromptTemplate.from_template(template) | |
| PROFANE_WORDS = [ | |
| "damn", "shit", "fuck", "bitch", "asshole", "dick", "piss", "crap", "cunt", | |
| "twat", "slut", "whore", "faggot", "nigger", "kike", "chink", "gook", "spic", | |
| "dyke", "suck", "cock", "pussy", "motherfucker", "bastard", "prick", "wanker", | |
| "bollocks", "arse", "bloody", "bugger", "tosser", "git", "slag", "pillock", | |
| "knob", "knobhead", "wazzock", "clit", "scrotum", "fanny", "ass", "freak", | |
| "bimbo", "dumbass", "jackass", "wimp", "idiot", "moron", "loser", "fool", | |
| "retard", "cocksucker", "shag", "shagger", "piss off", "go to hell", | |
| "hell", "dammit", "son of a bitch", "jerk", "puke", "chut", "chutiyah", | |
| "bhosdike", "bhenchod", "madarchod", "gandu", "gand", "bhancho", | |
| "saala", "kameena", "bhenji", "bhadwa", "kothi", "aankhmar", "launda", | |
| "bhikari", "sala", "billi", "bhosdika", "kothi", "sundar", "langda", | |
| "kaamchor", "gaddha", "bakra", "chudiya", "gando", "bhencod", "lanat", | |
| "bhoot", "chakkar", "chutak", "haramkhor", "bandar", "banda", "bakwas", | |
| "nikamma", "pagal", "nalayak", "pagal", "khota", "madharchod" | |
| ] | |
| def contains_profanity(text): | |
| """Check if the text contains any profane words.""" | |
| return any(word in text.lower() for word in PROFANE_WORDS) | |
| def truncate_at_full_stop(text, max_length=512): | |
| if len(text) <= max_length: | |
| return text | |
| truncated = text[:max_length] | |
| print(f"Truncated text: {truncated}") | |
| last_period = truncated.rfind('.') | |
| print(f"Last period index: {last_period}") | |
| if last_period != -1: | |
| return truncated[:last_period + 1] | |
| return truncated | |
| if st.session_state.selected_service == "Kendra Locator": | |
| st.title("MedAI") | |
| user_input = st.text_input("Your Queries:", key='temp_user_input') | |
| if st.button("Ask Away"): | |
| if user_input: | |
| if contains_profanity(user_input): | |
| st.markdown("<span style='color: red;'>Mind The Language Dear!</span>", unsafe_allow_html=True) | |
| else: | |
| formatted_prompt = prompt.format( | |
| input=user_input, | |
| response="" | |
| ) | |
| response = llm.invoke(formatted_prompt) | |
| truncated_response = truncate_at_full_stop(response) | |
| st.markdown(f"**You:** {user_input}", unsafe_allow_html=False) | |
| st.markdown(f"**MedAI:** {truncated_response}.", unsafe_allow_html=False) | |
| st.warning("Developer's notice : Responses are generated by AI and maybe inaccurate or inappropriate. Any received medical or financial consult is not a substitute for professional advice.") | |