# main.py - FastAPI Backend from fastapi import FastAPI, HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles from fastapi.responses import HTMLResponse from pydantic import BaseModel import nltk from nltk.tokenize import sent_tokenize from sentence_transformers import SentenceTransformer from sklearn.metrics.pairwise import cosine_similarity import numpy as np # Download required NLTK data nltk.download('punkt', quiet=True) nltk.download('punkt_tab') # Initialize FastAPI app app = FastAPI(title="Simple Search Engine") # Add CORS middleware app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Define the document database documents = { "doc1": """ A new AI analytics tool has been released by TechCorp. This tool uses advanced machine learning algorithms to process large datasets. It can provide real-time insights and predictive analytics for businesses. The tool integrates seamlessly with existing data infrastructure. Companies can now make data-driven decisions faster than ever before. The AI engine continuously learns from new data to improve accuracy. """, "doc2": """ The quarterly finance report shows strong revenue growth. Operating expenses have decreased by 15% compared to last quarter. Net profit margins have improved significantly across all divisions. The company's cash flow remains healthy with substantial reserves. Investment in new projects is expected to yield returns next year. Shareholders can expect increased dividends this quarter. """, "doc3": """ Cloud infrastructure services from AWS and Azure are becoming essential. Companies are migrating their legacy systems to the cloud for better scalability. AWS offers a wide range of compute and storage options. Azure provides excellent integration with Microsoft enterprise products. Both platforms support hybrid cloud deployments for flexibility. Security and compliance features are continuously being enhanced. """, "doc4": """ Our new marketing campaign focuses on SEO optimization strategies. We are targeting high-value keywords to increase organic traffic. Social media engagement has improved by 40% this month. Content marketing efforts are driving more qualified leads. The campaign includes email marketing and paid search ads. We expect to see ROI improvements within the next quarter. """, "doc5": """ The AI tool leverages machine learning for predictive maintenance. Machine learning models can detect patterns in equipment behavior. This AI-powered solution reduces downtime and operational costs. Deep learning techniques are applied to analyze sensor data. The system continuously learns and adapts to new scenarios. AI and machine learning are transforming industrial operations. """ } # Function to chunk documents def chunk_documents(documents, sentences_per_chunk=3): chunks = [] chunk_metadata = [] for doc_id, text in documents.items(): sentences = sent_tokenize(text.strip()) for i in range(0, len(sentences), sentences_per_chunk): chunk = ' '.join(sentences[i:i+sentences_per_chunk]) chunks.append(chunk) chunk_metadata.append({ 'doc_id': doc_id, 'chunk_index': i // sentences_per_chunk, 'text': chunk }) return chunks, chunk_metadata # Initialize model and process documents at startup print("Initializing search engine...") model = SentenceTransformer('all-MiniLM-L6-v2') chunks, chunk_metadata = chunk_documents(documents) chunk_embeddings = model.encode(chunks) print(f"Search engine ready! {len(chunks)} chunks indexed.") # Pydantic models class SearchQuery(BaseModel): query: str class SearchResult(BaseModel): rank: int doc_id: str similarity_score: float text: str # API Endpoints @app.get("/") async def read_root(): html_content = """ Simple Search Engine

🔍 SimpleSearch

Your intelligent document search engine

🔄 Searching...

No results found. Try a different query!

""" return HTMLResponse(content=html_content) @app.post("/search", response_model=list[SearchResult]) async def search(search_query: SearchQuery): """ Search endpoint that takes a query and returns top 5 relevant chunks """ if not search_query.query.strip(): raise HTTPException(status_code=400, detail="Query cannot be empty") try: # Encode the query query_embedding = model.encode([search_query.query]) # Calculate cosine similarity similarities = cosine_similarity(query_embedding, chunk_embeddings)[0] # Create results results = [] for idx, score in enumerate(similarities): results.append({ 'chunk_index': idx, 'doc_id': chunk_metadata[idx]['doc_id'], 'similarity_score': float(score), 'text': chunk_metadata[idx]['text'] }) # Sort by similarity score results_sorted = sorted(results, key=lambda x: x['similarity_score'], reverse=True) # Return top 5 results top_results = [] for rank, result in enumerate(results_sorted[:5], 1): top_results.append(SearchResult( rank=rank, doc_id=result['doc_id'], similarity_score=result['similarity_score'], text=result['text'] )) return top_results except Exception as e: raise HTTPException(status_code=500, detail=f"Search error: {str(e)}") @app.get("/health") async def health_check(): """Health check endpoint""" return {"status": "healthy", "total_chunks": len(chunks)} if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000)