# Imports from chromadb import Client, Settings from langchain.vectorstores import Chroma from langchain.embeddings import SentenceTransformerEmbeddings import streamlit as st import requests # Vector Store setup def init_vector_store(): embeddings = SentenceTransformerEmbeddings('paraphrase-MiniLM-L6-v2') client = Client(Settings( persist_directory = "./chroma_db" )) return Chroma( client=client, embeddings=embeddings ) # Document processing from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.document_loaders import TextLoader, PyPDFLoader def process_documents(file_path): # Determine loader based on file extension loader = TextLoader() if file_path.endswith('.txt') else PyPDFLoader() # Load and split documents splitter = RecursiveCharacterTextSplitter( chunk_size = 1000, chunk_overlap = 100 ) docs = loader.load() chunks = splitter.split_documents(docs) return chunks # Prompt Template Management from langchain.prompts import PromptTemplate class PromptOptimizer: def __init__(self): self.base_template = PromptTemplate( input_variables=["context", "prompt"], template = "Use the following context to enhance the prompt provided." + \ "Context: {context}\n" + \ "Prompt: {prompt}\n" + \ "Generate an enhanced prompt that leverages the context provided " + \ "while maintaining the original intent of the prompt." ) def optimize_prompt(self, context, prompt): return self.base_template.render(context=context, prompt=prompt) # Streamlit frontend st.title("RAG-based Prompt Enhancer") # File upload uploaded_file = st.file_uploader("Choose a file") if uploaded_file: files = {"file": uploaded_file} response = requests.post("http://localhost:8000/upload", files=files) prompt = st.text_area("Enter a prompt you'd like to enhance:") if st.button("Enhance Prompt"): st.write("Enhanced Prompt:") st.write(response.json()["enhanced_prompt"])