Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import Chroma | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_community.llms import CTransformers | |
| from langchain.chains import RetrievalQA | |
| from langchain.prompts import PromptTemplate | |
| import os | |
| from pathlib import Path | |
| import logging | |
| class LocalWebDevRAG: | |
| def __init__(self): | |
| self.initialize_logging() | |
| self.setup_embeddings() | |
| self.setup_llm() | |
| self.initialize_vector_store() | |
| def initialize_logging(self): | |
| logging.basicConfig(level=logging.INFO) | |
| self.logger = logging.getLogger(__name__) | |
| def setup_embeddings(self): | |
| self.embeddings = HuggingFaceEmbeddings( | |
| model_name="all-MiniLM-L6-v2", | |
| model_kwargs={'device': 'cpu'} | |
| ) | |
| def setup_llm(self): | |
| # Using CodeLlama local model | |
| llm_config = { | |
| 'model': 'codellama-7b-instruct.ggmlv3.Q4_K_M.bin', | |
| 'model_type': 'llama', | |
| 'max_new_tokens': 2048, | |
| 'temperature': 0.7, | |
| 'context_length': 2048, | |
| } | |
| self.llm = CTransformers(**llm_config) | |
| self.qa_prompt = PromptTemplate( | |
| template="""You are an expert web developer. Based on the context and request, | |
| generate production-ready code. | |
| Context: {context} | |
| Question: {question} | |
| Provide a detailed solution with explanations.""", | |
| input_variables=["context", "question"] | |
| ) | |
| def initialize_vector_store(self): | |
| try: | |
| # Create or load vector store | |
| if not Path("chroma_db").exists(): | |
| self.create_new_vector_store() | |
| else: | |
| self.vector_store = Chroma( | |
| persist_directory="chroma_db", | |
| embedding_function=self.embeddings | |
| ) | |
| self.logger.info("Loaded existing vector store") | |
| self.qa_chain = RetrievalQA.from_chain_type( | |
| llm=self.llm, | |
| chain_type="stuff", | |
| retriever=self.vector_store.as_retriever(), | |
| chain_type_kwargs={"prompt": self.qa_prompt} | |
| ) | |
| except Exception as e: | |
| self.logger.error(f"Vector store initialization failed: {e}") | |
| raise | |
| def create_new_vector_store(self): | |
| # Example code snippets and documentation | |
| documents = [ | |
| "React component best practices...", | |
| "API security implementations...", | |
| "Database schema designs...", | |
| # Add more code examples and documentation | |
| ] | |
| text_splitter = RecursiveCharacterTextSplitter( | |
| chunk_size=1000, | |
| chunk_overlap=200 | |
| ) | |
| texts = text_splitter.split_text('\n\n'.join(documents)) | |
| self.vector_store = Chroma.from_texts( | |
| texts, | |
| self.embeddings, | |
| persist_directory="chroma_db" | |
| ) | |
| self.logger.info("Created new vector store") | |
| def generate_code(self, description, tech_stack, requirements): | |
| try: | |
| prompt = f""" | |
| Create a web application with: | |
| Description: {description} | |
| Tech Stack: {tech_stack} | |
| Requirements: {requirements} | |
| Provide: | |
| 1. Frontend components | |
| 2. Backend API | |
| 3. Database schema | |
| 4. Setup instructions | |
| """ | |
| response = self.qa_chain.run(prompt) | |
| return self.process_response(response) | |
| except Exception as e: | |
| self.logger.error(f"Generation failed: {e}") | |
| raise | |
| def process_response(self, response): | |
| # Basic response processing | |
| return { | |
| "frontend": response.split("Frontend:")[1].split("Backend:")[0] if "Frontend:" in response else "", | |
| "backend": response.split("Backend:")[1].split("Database:")[0] if "Backend:" in response else "", | |
| "database": response.split("Database:")[1].split("Setup:")[0] if "Database:" in response else "", | |
| "setup": response.split("Setup:")[1] if "Setup:" in response else response | |
| } | |
| def main(): | |
| st.set_page_config(page_title="Local Web Development AI", layout="wide") | |
| st.title("🚀 Web Development AI Assistant") | |
| st.write("Generate web applications using local AI - no API key required!") | |
| if 'rag_system' not in st.session_state: | |
| with st.spinner("Initializing AI system... (this may take a few minutes on first run)"): | |
| st.session_state.rag_system = LocalWebDevRAG() | |
| with st.form("project_specs"): | |
| description = st.text_area( | |
| "Project Description", | |
| placeholder="Describe your web application..." | |
| ) | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| frontend = st.selectbox( | |
| "Frontend", | |
| ["React", "Vue", "Angular"] | |
| ) | |
| database = st.selectbox( | |
| "Database", | |
| ["MongoDB", "PostgreSQL", "MySQL"] | |
| ) | |
| with col2: | |
| backend = st.selectbox( | |
| "Backend", | |
| ["Node.js", "Python/FastAPI", "Python/Django"] | |
| ) | |
| features = st.multiselect( | |
| "Features", | |
| ["Authentication", "REST API", "File Upload", "Real-time Updates"] | |
| ) | |
| submitted = st.form_submit_button("Generate Code") | |
| if submitted: | |
| try: | |
| with st.spinner("Generating your application..."): | |
| result = st.session_state.rag_system.generate_code( | |
| description, | |
| { | |
| "frontend": frontend, | |
| "backend": backend, | |
| "database": database | |
| }, | |
| features | |
| ) | |
| # Display results | |
| tabs = st.tabs(["Frontend", "Backend", "Database", "Setup"]) | |
| with tabs[0]: | |
| st.code(result["frontend"]) | |
| with tabs[1]: | |
| st.code(result["backend"]) | |
| with tabs[2]: | |
| st.code(result["database"]) | |
| with tabs[3]: | |
| st.markdown(result["setup"]) | |
| except Exception as e: | |
| st.error(f"An error occurred: {str(e)}") | |
| if __name__ == "__main__": | |
| main() |